var/home/core/zuul-output/0000755000175000017500000000000015136634165014537 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136660717015505 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000417566515136660625020310 0ustar corecorea{ikubelet.lognc9r~DYvA6ZF,-K$l"mklkcQӖHSd'Z]@PQߋI_翪|mvşo#oVݏKf+ovpZj!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kfn!#Šgv cXk?`;'`&R7߿YKS'owHF6":=3Ȑ 3xҝd){Ts}cZ%BdARO#-o"D"ޮrFg4" 0ʡPBU[fi;dYu' IAgfPF:c0Ys66q tH6#.`$vlLH}ޭA㑝V0>|J\PϨF&%8'# $9b"r>B)GF%\bi/ Ff/Bp 4YH~BŊ6EZ|^߸3%L[EC 7gg/碓@e=Vn)h\\lwCzDiQJxTsL] ,=M`nͷ~Vܯ5n|X&pNz7l9HGAr Mme)M,O!Xa~YB ɻ!@J$ty#&i 5ܘ=ЂK]IIɻ]rwbXh)g''H_`!GKF5/O]Zڢ>:O񨡺ePӋ&56zGnL!?lJJYq=Wo/"IyQ4\:y|6h6dQX0>HTG5QOuxMe 1׶/5άRIo"dgӦ{ qke5@eTR BgT9(TڢKBEV*DDQ$3gFfThmIjh}iL;R:7A}Ss8ҧ ΁weor(Ё^g׬JyU{v3Fxlţ@U5$&~ay\CJ68?%tS KK3,87'T`ɻaNhIcn#T[2XDRcm0TJ#r)٧4!)'qϷכrTMiHe1[7c(+!C[KԹҤ 0q;;xG'ʐƭ5J; 6M^ CL3EQXy0Hy[``Xm635o,j&X}6$=}0vJ{*.Jw4Cr+N\9fǶy{0$Swwu,4iL%8nFВFL2#h5+C:D6A@5D!p=T,ښVcX㯡`2\fIԖ{[R:+I:6&&{Ldrǒ*!;[tʡP=_RFZx[|mi ǿ/&GioWiO[BdG.*)Ym<`-RAJLڈ}D1ykd7"/6sF%%´ƭ*( :xB_2YKoSrm_7dPΣ|ͣn/𚃚p9w#z A7yTJ$KOL-aP+;;%+_6'Sr|@2nQ{aK|bjܒ^o(מO80$QxBcXE ء\G=~j{Mܚ: hLT!uP_T{G7C]Ch',ެJG~Jc{xt zܳ'鮱iX%x/QOݸ}S^vv^2M!.xR0I(P 'fΑQ)ۢWP Pe>F=>l |fͨ3|'_iMcĚIdo阊;md^6%rd9#_v2:Y`&US tDkQ;>" ء:?/GSZ;m#Nvd?PqTcLQMhg:F[bTm!V`AqPaPheUJ& z?NwpGj{VjQS,؃I'[y~EQ(S +mpN, Mq 70eP/d bP6k:Rǜ%V1Ȁ Z(Q:IZaP,MI6o ޞ22ݡjR:g?m@ڤB^dh NS߿c9e#C _-XѪ;Ʃ2tStΆ,~Lp`-;uIBqBVlU_~F_+ERz#{)@o\!@q['&&$"THl#d0 %L+`8zOҚƞ`wF~;~pkѽ)'cL@i]<ք6ym®Yi&s`dyMX](^!#h k:U7Uv7쿻чd)wB5v-)s蓍\>S[l52, 5 CۈP$0Zg=+DJ%D  *NpJ֊iTv)vtT̅Rhɇ ќuގ¢6}#LpFD58LQ LvqZDOF_[2ah?lm$K/$s_. WM]̍"W%`lO2-"ew@E=0D"\KjPQ>Y{Ÿ>14`SČ.HPdp12 (7 _:+$ߗv{wzM$VbήdsOw<}#b[E7imH'Y`;5{$ь'gISzp; AQvDIyHc<槔w w?38v?Lsb s "NDr3\{J KP/ߢ/emPW֦?>Y5p&nr0:9%Ws$Wc0FS=>Qp:!DE5^9-0 R2ڲ]ew۵jI\'iħ1 {\FPG"$$ {+!˨?EP' =@~edF \r!٤ã_e=P1W3c +A)9V ]rVmeK\4? 8'*MTox6[qn2XwK\^-ޖA2U]E_Dm5^"d*MQǜq؈f+C/tfRxeKboc5Iv{K TV}uuyk s" &ﱏҞO/ont~]5\ʅSHwӍq6Ung'!! e#@\YV,4&`-6 E=߶EYE=P?~݆]Ōvton5 lvǫV*k*5]^RFlj]R#Uz |wmTeM kuu8@8/X[1fiMiT+9[ŗ6 BN=rR60#tE#u2k *+e7[YU6Msj$wբh+8kMZY9X\u7Kp:׽ ^҃5M>!6~ö9M( Pnuݮ)`Q6eMӁKzFZf;5IW1i[xU 0FPM]gl}>6sUDO5f p6mD[%ZZvm̓'!n&.TU n$%rIwP(fwnv :Nb=X~ax`;Vw}wvRS1q!z989ep 5w%ZU.]5`s=r&v2FaUM 6/"IiBSpp3n_9>Byݝ0_5bZ8ւ 6{Sf觋-V=Oߖm!6jm3Kx6BDhvzZn8hSlz z6^Q1* _> 8A@>!a:dC<mWu[7-D[9)/*˸PP!j-7BtK|VXnT&eZc~=31mס̈'K^r,W˲vtv|,SԽ[qɑ)6&vד4G&%JLi[? 1A ۥ͟յt9 ",@9 P==s 0py(nWDwpɡ`i?E1Q!:5*6@q\\YWTk sspww0SZ2, uvao=\Sl Uݚu@$Pup՗з҃TXskwqRtYڢLhw KO5C\-&-qQ4Mv8pS俺kCߤ`Zn4(sgz1v&YN2姟d4"?oWNW݃yh~%DTt^W7q.@ L⃳662G,:* $: e~7[/P%F on~$dƹɥO"dޢt|BpYqc@P`ڄj҆anCѢMU sf`Yɇك]@Rɯ?ٽf? ntպ$ˣ>TDNIGW .Z#YmDvS|]F)5vSsiExţ=8#r&ᘡĩDȈ\d cRKw*#zJ9tT :<XK*ɤwoJarExfKB4t@y[6OO6qDfEz]1,ʹB֒H ֱw;SpM8hGG&ƫEJި_1N`Ac2 GP)"nD&D #-aGoz%<ѡh (jF9L`fMN]eʮ"3_q7:.rRGT;}:֪a$)gPSj0j3hLư/7:D-F۶c}87uixoxG+5EekV{:_d* |a%ĉUHSR0=>u)oQCC;^u'}8H0]+ES,n?UU{ x~ʓOy_>?/>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQXV-/p:MP\<=<^越a/bz?ܓvjIg3MN4:]U]STa,@OKdĹgJ8@o2k'Hr~4Z(I8!H G8HNW%1Tќ^?G(" 뭗R==9!nKErHc1FYbQ F;v?ob-ڈFalG*rEX}HAP'Hҷ$qM9(AHx!AF 26qxCdP!NZgҽ9l*(H Žڒ;̼|%D Ɖ`Pj . ֈ,ixp`ttOKBDޙ''aLA2s0(G2E<I:xsB.ȼ*d42I:<ŋu#~us{dW<2~sQ37.&lOľu74c?MՏړ@ -N*CB=i3,qjGkUտu6k Cb8hs&sM@-=X(i7=@He%ISd$&iA|i MiʏݸT{r[j顒x.Ƞ"m@Hy_I )j|s#RGI!dTKL&4K>#stV \'xMgaSZNg8>e!^f%cYr]qs:"̊;isXa]d+"v=x7p.fZCg_Ys;pE&\U}ܫSh])qKYAـhhdEnU14&G * QIQs;rԩ.k83֖8Muqu_48dHܥlWW q>fu6+'}xu\Veelz`Zbym gp8펠ˋֆ:1IC8qٞ\vXçL ]X/r}7O}Wh,h ;RQ=]u00yiC۔I^3!?H6iUH:ô 4P$rT`%2Aq-֢׍qt=@x#~0)p# ы9'iri]ͪ/@繁qVGCڤr,DihB ,m 9 _$q3= A$IC"6g^4e`Xo(D*6"^eTh'4xpFڜe'fVQ7~'c L^ԯwIڣA.}H;Ë*׬=`^ 9]r鐃 -Dfi2|QwZk‹u^6DQ1&H凎c!n[mi3)WfsF:M"uҷs.1!뾧1%s,hQs|hx̗3%*v9(I;:'>uQ+v)vR/egBhAAdh]4H:nV$tHI98/)=mͭ ڐn}}~ק?g_6WĩDRc0]rY9'z .(jHI :{HG}HDN`h7@{jnE#[dz;n#y 9D*A$$"^)dVQ.(rO6ӟZw_Ȣaޒu'- ^_,G;U\cAAz7EtlLuoXuA}bT2H_*kIG?S(קjhg 5EF5uKkBYx-qCfqsn[?_r=V:х@mfVg,w}QJUtesYyt7Yr+"*DtO/o۷~|hw^5wE of7cꃱ.)7.u/}tPTGc 5tW> l/`I~>|灹mQ$>N |gZ ͜IH[RNOMTq~g d0/0Љ!yB.hH׽;}VLGp3I#8'xal&Ȑc$ d7?K6xAH1H#:f _tŒ^ hgiNas*@K{7tH*t쬆Ny497ͩ KVsVokwW&4*H'\ d$]Vmr달v9dB.bq:__xW|1=6 R3y^ E#LB ZaZd1,]ןkznxtK|v+`VZ3JϧC^|/{ś}r3 >6׳oƄ%VDSWn 0,qh! E-Z%ܹpU:&&fX+EǬ.ťqpNZܗÅxjsD|[,_4EqgMƒK6f/FXJRF>i XʽAQGwG%mgo 恤hˍJ_SgskwI\t`ﶘ080ƱQŀllKX@116fqo>NrU Ѣ9*|ãeeH7.z!<7zG4p9tV|̢T`˖E ;;,tTaIUle*$!>*mBA2,gJIn_kSz)JC]?X(OPJS3.}clݨ{e!MB,cB߮4af祋,1/_xq=fBRO0P'֫-kbM6Apw,GO2}MGK'#+սE^dˋf6Y bQEz}eҏnr_ ^O^W zw~Ȳ=sXअy{E|!4ӥ2 ]8â6 U`V%`!c%؎ʨTzrKh! c.}.D>)d_ 8rcu,wf2?Ǡ*_lDn}rauyFp*ɨ:UiM2r:9ct X1lmĪ o玓,R%!`hGT LYF#g<cm${|Xdu4tmtїUJ\~dc0KcMlf2?mμQ ߉J4WrSHTdp"ӹ'cJq2zPlX̯.0H!ND@UapVoGڧD5>H]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]~We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~);qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U__=Kh{Q\|ZUN 1vu~\^TT걼/Xkq{uu[^usqUKU\^|O-;ί:n'ܸ՟ +[)/:n埢&/8no!q݅l@Ud_nnzΙk0hAϏa$ X )@VW)2&?ul_$,1=qOA;?U} 1^:XK \ )@+q(}* y 0< 5IDK5AhDK֠]4xMILv;Ull- }C| ]x ĉjlli˚| }xT-x.!>Zh?EV"sd!@БU ^p%pO3|B5=2怕nwRqR9~ i±za+HFNi>. EWz:V^&YEs5Ȭ N *7{!fRБBSۘ† Er/IGU}APQT]|XN X]FbKjKdO U6[3TTX)|*H'2U0:VunBl  `5/@ա06VNO8VGON@KgjyK?Wq1egI+ I.*F~L!Gf"LD&U 6tGd#fR*c ^tSLjnKS9 Ȼ \ >lr&}+̼d"I va,Jm_u)d靕َ| Vw85F3Liƙb<;dM-})C?Fw*IJ_3UG'+¨[9| >80\+ xJpΕ`p~mg˗%F Rg(6=/r+%a>w Ohght uЍaRs ^d6GXAf?V_mW puȇ S:tŴvŀU#-*mZ5k5r)_x*8ͼx@(k:_TX%[paRu~}#Ѥr %A%`;MxB[CzR怕#H% }8@*AM.SEhd,rKrʇ)br\+! s1CtӒNc_:F*`Nv;ogQFa2V%ZniE|nZ&-I,t*ώlo Lhnٓ'Xm R ˍ-~ά}hs\5TT%~am.>!LcoJrKmqvez܅E9t6FZXgsreHhlٷ+ [}r:̓?W~e6>0E8`Jq-(ed;W¨:Ä&]䒿e;0:|$Ȃ1L-%;Ƅ{dɱL;V[bp>!n&աIJX1$9;[?- й vRCxKVV+#lj@_RL;IQ8ŢΌXD@Z< (1ZRÜ:OUM/vư{'jYXE4S/8 7: `/ +G\ U>]B2/n2=8) B gJ3bcKo̹ʇ\B~Is 2sO/I!}xV&\b<9$4Nve^آ]$LGF@LjKٕyzH 31Հm-XıUXF|\A-2) ' RG6h?āUŔyj[j_ӂ~ яA弆^bDyzǖQ8`jXbsK?l58,?YP5䜭ve9YFznTEf3Ja\,@2,?WYؾNr<V` =V[oB5!Z\ļǪЎr8@*ucѡv\[|s L-+y{5K@dzp`r"mũɸHNd"yc Pu>x2;W`_VR<aӗ&D<=h-Rר|/r _ǖھcߖ]G@Ն;UQG1 '3Jە Q88ASUȿ!:WѥLf21;d9OU᧯MR3V:<}xXh//T+coY5Ȧ4/m0NE(G2[+G~H'5ipӘ͏O +Px SPp.,?Uv|$]oȒW/``'1^x7`l6%yl緪Iٲ+V&aׯuuHMR$i,:)O6Ⱦ*L.H6E)}UbNs}UUaWD#Jjq+mߔci쫢"DW{k=*Fêf}ܘʺ'<(_jsDUao".{I&2ZeJۜ/_ZbĨM&uOVZn8L/Hv*8 yf /H'ogđ~I.R)Cn %}| qiQ =94;Lטξc<"8e,lM׺Z^k*MDg}SȐsp>Mtt̴tL,glov_yP4WǼjZPx `Q1`M6]\~ }|ŲtOrm-V_Ǵs`]?MM昿DrG$T趼fXbK8C% []x)2\wB"+KxH]_m,'q3qcV0gge,L`QSO,mgI>LxH.Y3MTb\|w0; Ŷ;jx*RE3cy.}߷a0C2Blݴ,q#67Q(̘o5YZ,͊vS? :CqDQ-[-8/"e%갵u0%ȼXY"4|QһX癸>Xu_췔FbM@yLh""EB!Q$J`5"z51B.6jb'PX\_G0WQ_,TѼiF' |J,%ZW|KoNFqQ2x$ǫ2~tVfkRjTJ%G-'auY[u7#_h<3IS`%bb%dmZ/q+]ug~=MMкH\4]4[$c3qp.Q 0ZGg o9DBTE[NsVjhPۓ7%.YK\U+ r*?~/!$ a^wӔd{KM=L<{k( ͛Kaΰ4}LxUT&qv[[z[ TDr}ASG$(=.֯]PICXnǚBh,/.ڊ h.Ꜵ}IX3]}Uβɸ:t]0nHũ,u秂l؞wECFR0ijЛ(JaK6I>;be%^7H@T4O!pN1#  ~N(K( a nVmZ=7yi4[{+ˣwaX3'%{Mُ͟$1K|j'ҿa.uX&JDpMj`wR 7Jg{s"8]΀pQ'q "D kaM-D'"޲v_'T@ <87k]+܀4ta6Uf cc]&>GGiFGt~',]yc$ D/jBD KI>$:%=ۇ Vq|#]BBahS$yKK"::~bo+mpp%'RS/4PO{H :nӦ,;Xt40u56"2Ƿ}TP׾m ˀ-oFBF}|HXں|/5]"ڼfFRޤ@o4UVGp!R24sM4W4ɒЉvl#DS@]p^1Kk @ZS#)S I8FܳB&`IHQ8dQw=McQ EhI${&eU X96uQM[gID⫡  c#Dך'ן+o 9mui+$gѩEr><^4M0us#<")wRE^l؀x);M'RsTז*LlBsTj&C-^i+pv5oŁe?ωMtVF6/s<3YHꢺL~,O)r94T5$DަoHx{+< ,D$kȦe.'r 3ePSFDQ˓XԍV)Պ0V8C)!s|5m-0tFԵD *)52efp醱kKEߪۈ@̀H3|Q*Zp^dұD$4!!6MHy$Ȳ[振prOB?0fz.$%PIO"]eVXz律cy[{dI"ƜwWAkfu*UdUoHU~0u\_X53 lH\dZ~4Mu|a7Hj)*˾($ ,Z&3"U9@$ J+R9T9Hhj~MQi1Do;[Xj'CҒ4# 3iTҷ\_u$`H>3;J5kAˠ7uڑ[  T6+ן Px E<ϟCL8S3)8@ An} `311 ɁG!g oGSߑyeu9f%V~p {bpva4wd"1`!X0'Pܝӄ@ 03j>d=x ﬊.X%4ήD0>%y@p6 uJZ93>(`qS 0_w ٍɶ%عeiP Ls,19Tu7---IaG ХAq-kr(/:4HΌ3L4Aq"+sph"t;ۊc6Aٻ5%б(]ر-*%{NA|,o70ԯGX׽k 7l+elEtY~]KUyMa ᅧ_i~]s6*\;Q"H_ǵ&qf")DHɎ'$Z" t޴I#..ĤIu#>uG0:{,N{L=doMIg"X^I~I);@`l.x=z""(cx wn~RS;EP[⼸/8Kc#yzq% q{ MTɷ9EAo$/r Dj2n}f " P|_郇,T!EǞL) A,64H>#W #B#̞1 gİ4G;"r@P:;HPO/Yr!XoIYXpP~ĥ2qzdoeIc /%蟧zWg)>rWRW y9ՎO-e=bl;(wrCewpӧ~'nGڭ d/(S1.TLd=vaf({Y~Hʘ݇e ݆LKޱk(SP+9 z0WaIh@^yqd3{d+7lձm?AmV0uUvM!_%۲8a[K HͼxkڶS$ bJ;; Vٳuv_< 0ئ016`>x9ד|QH XBm`[Aм׏HY}q_t۫L!Q8(`{=u~ζWj!J34"E.,IKgJ[\@ٍa\X%gIwg#eQ , H$J)9 Y.ywJm 6Z K TJԕSrGaZIψ6 4TGz$HΓ6m8R2.w} hT&rMq^ĩ,@auE9gSH C6ād(/bԵoy b00ul3~x4X'a<5b թML+vA l<OJ*81Ռj,G92 Q\VOFs l9pK @nl&BiK @ɁVϿPBA0JhEjèX(߰$ mMF=_aOWc:D41yU9M@{Ҡy J<{Rk. $%dpGX\.Ad^Z,A*'9 y b^Sp!,Q) _HC@&TjL%*z ֮ZX]5,0Åİm@b#תZzP)*kƒ ,c[ QƟCL[zMay0WHZL ~\@j՘9Ef0$88\9&o#A^ߕPLkx霛|/`y"0/(hC h77w8o>9&yp/|~[<~suߢc|2O˫yw f]A]sI~L4q\ËP{Qk9%9E`j; Ǯ.աt{0"/Y4v)4&Gޘ̧ ȣ,Aj\|SW=P3]ai':c7,),+h[fjj EUcG aSᙏ6ԖS`Zk3F͵Lkzzk|(r\Zzބs.L4>{ܚ ejR4҅bT~5mQ`<Ł~$G;9tټUt῱@R?MWk'w.k5w ܝVs?R'Rj@&ځPkwBHM(ہP;l?B e;j w ޝP{?B'j@&فPgwBuHnPwB u#}"zmBv'ۏPz; w ߝP?B'o tE7:l)%$)o3=̠X\"-N@_zqxt< t7ix:_r.DW9x—cU&$ݙ/ϠvxdPgLN}Bb0N7 5-PJ

W@Y(­Yv`'3d²{d}])8{R^} dO:>ie y}N ?U!S)XUZ{63HKqO.`0) PIM齂2#9]}\gR"^638Ǣdu!(eF {(e[rU%ryZ*{ߔ|wSArlBRCNڑ%?."oEӤu''u)f+,s[}l;MFuUBd`( ZJv%~7 %f{Y}tv`N[cO|ل^ʤ,#pj$^YrU)>RSUG180j59A_rRHY̺`i TJ+ 9 o4Dv>ӵYМF$+37%a:򊈉YpWu` 6^Yua(N-ñNQT*lfy+ 񅳌8-R̹g`Yp@giU yaLq oH-@'ϼ1up8Mw|'^R50&_W_,OH"~d7:,$Ei0h[k?HY{ 4\\/p7,b=Po0qU\h编3E|?g35!m⎕`Y~l9a81K\Kp *ׯ{憃: &$; L:hUEb[t%%#W B0kRO,?( (-a ߚ\ޏ~'e\ 3Q:sKFR>K`!! {a}$ fQn UbhjlRu>v|_.wi+ VDr:sy&֤)k\ ? gb93\Ϋۓκ MբJ !sFٓ8#gnBaoF7Wu}==[WILi+(uu38Mc»̢H]6pl8 u'5)bnP+-Npeo}S#QOh ̓$K-Ae p ͟yӯOxѵNx-tH]Mr,?IlUC cbE,: E6t+azN<~)HM z!eAO $i(].:k>58/?םu&4kIEC(IJS& g\{jHpL{7Bݯ6NG! xYg^ (vY:׍jٹ`/ Mk?ݩD `۹jgwvbt[|Ip2gjv-YK){t nN~_Hp43(X*L+(8͒/SMR cV7B`GO(΄.IBl%KÊtyG)DۼciO%!UEi&2|6y|>h_vg.N5m,oya_$8:5o8# s%x҉FEMFҫ1Ľn;$ UNA%%Yq=k%e 5 0nuE={XT3a"8nw$8ڟj%uvc@2pT/ՁF9vUmZt,$e.xQN!p[=|@vR%/a 9aE._t1}˄)8%yIѣ0>o: qG. =̥]DQn& &Hp,<{2}CM7G {G ¾jU}\CNL`w<>؍ߴKt#ɗ=?![[,? a\tJdjSiyށmF<)rD/r ߛKbfy4jv(8xr\RBv#M7nLm9ɖn6Xp\j љlI!4WixFF\Eu]:n~'npeF$prbWJnuHIˊHFX/$8ѺöZ=Ʊ`ulQ5lٟ[ܻo #P#vְ2)btCn+<]ne|Z;z;z}9:YuTZ $9(qMBkN^âDءEB4X:uUXkPKBO={V0`8M<_D1^%Ͳ]1J]g[Vd f&LeG8b4Ҕsc &VxyCYVur*F(?B`?3kYn^d҅RB+s&4JBtJKp%-quiT:`, z\$ToTkZ$4b20 qM?W& ^}j8u16NL@XY'Sh&Ufp?o2x 9.yTXB3|sUwsv0LYӥJj1UK &^Ę!/Rs/R*,GzEjSqZIxD UZ^פĈaˍ\i/kh_#3'Y 4k87%zXJe'y}ub~þ;}V~9 C?}u[_mw?will{eW"Z-[-|@QRu!xuwަ~O7g}xĢ6%ƌ繰k++?THv}Xn$֒f^ijKm8M̋ܓyvO4^'Ꮹc{w$8zOBkeo/ %JZIa",ݲ 粌.%OD!mm\k%y|5O l +9͛Sdg&7GG,_+#6S_ ~Uh֡_;m3o`)aX (t27F!$&]Z0u%}Q37V˚;erTlG쑎amO (/h mzFB`G81*5RO0ւϥb:`̜5ۭ㦾_J"J`\iDԖÚ:slI_ p[e7)baoE8X#ep{_WFdeC?Xt.9B0?x\% #z3]$8J;Pjlnb$eLk0D!LY9|I*eҭˑ末5QhD}15^IpIaاCnaJ%J,`,{Ks wqV/mYjZ5O)/Eo+?i*v7hwm_J!(87ha*KZԻ~gFȖ##Yzg>~C26=ˣ a⃟NeE}]c5IVN!DLPNLK[ &. ?41> I!zyځ;GN;5Yqed8Mn 5ڻGgY14RR|ߝ5Qv<8£Y|s9R 7h_K>cAbFIYjA\\뢺;nM33 >ֻׅ>FV趟~MDVptGd?{0tdGλODcsD38ZY%XֵtN`c'Uf/&AwR BFu!`WP3[^=s{#ccCW0waV;&?1yLg~f\O~!}oӫ~O#E ~aqaX:*(z'~(hݽdCk?O)6I0.JoN ~.y?d\Mr]#z i<8}o'{UߟIAvMO9B߀ &O̓dj U=HOEh<,f/=^1\:2%o{ %ګn}w`^͜.*zҝdQrcggI g=LFׅ6)^}Cx8J(Y@u=jrYL7.qfjJ4 1e̋H5 @j-^ۖ33uj\,eEq3bŎbKSNEaV́(^m2/$5@iY G>LFPjgS\3s'8"ZEaYZbVW^WGPmgU%\D_-ϳw%ˍ5p .'VjW=_qϛ WSu7q]5v]D*ppy6E,Ӯ {،9ppvIJeZ/?gg4n0-mMz^A?DS޸yAU;=(f^oar<<(KHts 9݆$G=QEt$)9vð_m/B{M>v`m: ?>n'JО} -CG/8gGbcG'"R?l@[r-^j/Cd_o<ʨ[/.4|~<׻rk՗4~rn}MF=++ǵ%vٯ6õn@}uW¼kgCRrx}YeV^f RqH}%^kcy%nB@rvpo9= LUEd,_ǎ8 /IɌ)rʎEgED[tiWҊ z[[ 04"02N/ 3|ji t;t^ڢ8e7a-ʃэ7WwD?o?܌}%?u>c;" W1\z"B9ੑTx+T ©fq~(>/NB6tw69^ t@o5-,(ky͠vܡ5c@]3$cĐԌy8vUDr.%>M=Ġ󐚠-`ƚѴFѴk"y֌MO"А2cNzE})0}iv jX}<5iG4i 4t< b4,I&ĥHx,HrCHE fSM5iG4i׌x#;Xk`CS)ou"8Ax TH2 J4aTz.kFӎiFh0f= Pժنt15)/s;qJZy^dD<fz0^.B*I6uM"pK*'{$I$ :fA=kTmlzf2 cxer?UCWf"0OP.,c.u= {#.I<]ThD Бy}}0`\cG/ goKGtz(vWnst3T]=;E/GCljΑTupX92Q0I2+ϔbBi5K>_!dw8F.IƳ *pjDSD{Abf>ZNܖߓ庘qy*D,I~K2 C0, mat ŸK0ODXa`cTǦywWXGU+GOm Y;) ЫJ}NLgT&@&R0x` i,#7b#~|ZO'HAloR b^f ѢXp/B6t}`rĊ!՞ ^ 9d0NDjFM_b|>^oe~iM0,j/ť-0SApڬ2I dQDSp?HԘPBB0(3lZ9r! KҪL24FKn]6,Gq5o-ݕ؄敗.8'7x 94z爥BRID0pRDXjɎHۜ_Ơ~iF-?0mmXȍVT4j\kg<<Vpͩs( שu9OyHpF*fF%`iRMJkRʕ#BVs QU\HC@}n"S-IBR2nN L T_%lm%1X5k˶tPB1`0ppK,6J1`JfFbq:NatEoi۲FJDkg9Ԅ9;uXgGl(࢖|[ GIU>!i*p}@^26F`i~n1 ~Af{KbㆥËޭPn#& n':Q [KjՊ)\* E #n8۽3C S1DJϝUf?X9\B5gZZws( q`M#҆@/m+ܳ Di&U&uJ,";AQ1'N0a 0N%?iodGdZEn=qmG ui20Du>g.Y< wb!QBQI^bV-)kؗ8QD ZmѺ=ܦba$V L72+ Mq]n)N"[61nOrg%7L`N 6(prrblRpjxCm7|B!,%#Ȧl)Rh*N;@QZc'[FS0 ӚKIJ8n"ȡ4" MÈ ǧs6%Ʒmc+p84Li?{W$)\4)pIPci"K*)9u7dʶlhb@,1co!ri0B CHQT)<4vd1 0?e/`^XV0 -Fh-\#Vp|O$/o^4,&JS)cOa BJxõl(r2.ʸc) =A>n-2ƞwlc{ty*PF~e~JFT#ZL#"lv5[՜nNcIbiJbl<C2FXL6 k,)R#Dcg2KpbX"_ ݓֆOu57(~B0zۊ\hZy1ýe1ڑ(vb{\ sρ"k0(J{LLYF1(3&MLbblc}RD STa I٧2R>#@8èzdf$aV[ 1 k¬9ݜj${tdUd&Iy rjKb#cFrtV,Ԡ#4L1$Iq,x͋U4E9M 2PBU.N2XdƖ lUĘx'?n\[H+i{$ u|`)N3xHh/Ox8l5lSrĮ8Jl畵Y7' FO0;^.\;\z% txE4":аxGo5,toU;!Z!Ŧ;"#,39߽كnE1v3^yHa0:.˻=$ӆh5?q-#"L\ݙ#~ݯ醀|s1vE*DeC{ԋEP.rU;eq[=%~hZf{5=6D߲E>0)Rf?O'dy{WD UgJ0JOV.#cut{d X Wɾ.C@7A<.sƇt)E\^ |R4 @dTt?)6>~pYuZ`s^]*V[Fr1_.ō5b!`(*ô1,̜#5&.|:qƗ3mx]y^|:Xi_AVh. Zd .Q;tsg܇h\ƌ!2YB1v*<܎<&'L1$"!L8Џ}mPIL&шi+ݛʎDz& >I0qCcH$1o#-N#R"2czӋlT/Ncn9\g$tn#Ɲ;Q15.Uݚ-2yA! hIPڜn f:&%]o,qWnQnSͭ2~)\owV!< '0 v/xxc(GY93pGr#,Bّ ?~wnT @ Yن[ܺ_+5~F:*7EX bV1t:x[Q?;kY*b˂||` +%TWwV̦?-n?nNEH +,88x>1'_6t>UN:lE(CN  #:̡5Y$ gQs oA2\?͊?_ YO~x/qU~7/[ur"uQXO +.%̙_S,fA-ڤ^,6ѠbZ[s-OEL Y3oYz@^egA$uVb>:ZX+P`q]+K++uYO~h/jrVz Vk<$Yr%-^=8V7코jɪܻyw*{ X-J\:zgDKt /%x;/fI9b%qCk犽׷e+F@W| DV8]x6:{bva2pWmLlZ.M?4.!Kh-!r|V,*ۜz_ Ǝ'Ay"leT=ZWZNŋ`r 冪jEF !XE1oLGck-.G^ /?O h8b4uI:[B_6: 㹝>X/ۯupje@ʻFoK<{^Vw:fpM֩!Ae3Ʋ 뻿xz\9vgmѕ;Jca6?ep \@30p*5mb@:IĐQZrJMX[v2{=FM/@84!KhtNDժS/ZMD0sRJZ: 4lXd ze+5&ڄdَ뷈Y0IK= c1\p|dz>d => 7 g, %R!&PYE}Tc FJht@ LF*ypg9;gRqǏ]Y3<(BI( Y.EaE,Cfo'g7[i&EfOMIs>V˹|,suP}cf7oj^lށHe;ŷd]yhO{ܷ?ޯ뜵?~7}܁: ¦f-U2I< >t -أ{ oӳmL]yۼc#DgXTX%$<)VF'{FOY`ˮeނLCn6"9$%濧75EuZ攼6U}\Kk\f;G}6O/;C ֍ژa='WG;l|6 \/Wq)^^fսR&t'On{#՚!nC:p?q3RޯwhPƄp7Iē\x=qLuN 4z_*;7ozFtR[\hW_^&;9o#Y<7Ш73t̶ga1E  y٫셞sZ6D[x^c;(Tã}P6zGV:2s%OQ)Jye6d&Ef0wFo:zWGw΃%4ZFH[_L\4+s0n< s;xϰ_8bWÑ"zE5:%nkt`v$B牐v#z"Yo#)Hѩb{>t[zTlg+L{x6hHd8c~S"'47Hъ0RPSXP{ett0V/7s}Py|{tZ&#kabL)uYwmZdlܖ'6*?7cm_|YI-?lCh?l~<(FG⒊)$eFIYýD( :ǝ@|~<7Ut0l_JX˩X1|$T񈘴q=22`0:pDfMA{,'pQLL F.[7%UIcBP{!2ÆA/npȕGP)"nD&i e= aB.K}IH,#Y熠LKH0P  G^zw.>*),j̵Xe.Ī=ĐkÆX1ƹO3/O6dH@6SO`@xԃ7vJg3;!A\`TQ.b0Nlc{t!6Ѕ  ]xD`zƲ\ xXAa'kn~l.ȟ q8 $/4#+2'Cϵ q)h2yyY;KV{2f(I 9uc "'x"}.?h|(>l S67P1[ 1Q1e{!`Æ[\_ϩK. o#C{r2ǁd4!hx_<Xq27',c@VFBTNhlwVmj5hxOM^qcD,U;QRNUhg00Æ7І/OҤ:Xźek־(D~37w\kٮVѣ9͡eC}!Eo&WqT:8y%e͂mTf0:74hx_˲te_*s7"[ pfc%B A f4/TFFIAɕsPrJeq0|,G%!W}ay\*[p8 /WL28g/V<)D e8|:Z:_vdY\ދ#K, i*$-j`4 ,6/{510k>˜dV WngvA"-sTĐAn>_Tt'JEgNESIKY^M9)9 IA{BN0S^QdJZ[4 -Lbf'1}Amqvy׋q刡sCD4L^:^ȫT9%&y 9}AC 2)yU2S֊iɋghު$(A-sGMhxOـ n9Ph[ˆ gT i~a`$hl6(p 3jKH5q\6/z Ӻ3ҙ_-xs@̜6ZR$꼉pP82@FSABCB,L;&ΐ>{<ڧ<2ot,*\ Y,`q ϣ@![k&ZRЀZ8@ф4/zrN F&[Iȉ3)Ծ<|7Hh}!2{ {&dUa/f8q *Av#5\Ӛ3(z0l2oQeCc62+`~>&EIsh lI{a]9up׽ 1%J!6<;%=9N#MwX;hxg14:Z&M59`؉uHAp2kG@3yPT&Q9QDf4/z89٪FeuT ʑ!I,XtFp}4/ 5Hx)5TߘVDf=s,yDϬdF}gdYfըy?{-[ [ :ă̋e !S/O9 >@B ldfK|q"=dlP Dy08Y٨> q !5@ a4ڪMi dX&-}Oj=ctsZ BJy.)Xip6UJ#Qލ$RFD(wIy2t\zJ %E|ֱlZO*o6c|R Y g͵*K*9,5[D%YFI y6żWY$ߴְnI ÆTmw6GEʽ;/;A)j7*lA-J6Vw;PfyZ,;ts\ R:Tٟe"ܐ?i9@Frk\S=Ԩ,ڜ ۥRZqձ`Q_W<-–9jـVb cy*`%ĀUX"y 3"~LAmc` Wz@P^r ۟9`>z2']ܠ܊SJ6;O/|QمBJuoSH(YYؼ3gOB! V5X[iUzՒC) eFQۼH^QfeވC&S-se r*ypOpA>-sT\#ԀMdA=C2*I^.Zk,R@|e,gۻ/!9j9l|(d!x"f[(+HqKNc2"PeV׏tdse-X ÷f[oYzk[KоVZ#4Չ6}*1č}u齯i{R2֚NJ9a}̗ ^[#";͸Ʉ&=Z|TEKE1*-qve=h:fD΁&ՅQU)D!"ZSuri<8cG^e801wV0vj4.&Gf&K8YC6 2Hl"mybiO7׵0*3TIb`jx/a'xg@G%%f!m͸4T,Sq1w3VԛE" jM,ʒ(je_9YβhԽ/oM,*gS6y?> roi0'1J5,Z-b{/dcsj]r7fy;ob,4 P xŕKAisKse3qc})Wng]DX&d>tx * V3BP]S\fk<)VbZ (Dqy<1{zߔCI ]4VXT誘C1Fz__b4穗N!&0ÏfdD%%JH?Ƚ`Z8>$#Vfm^i-BE#/ dݜdxZjZ(fÅWL!qW!އ$g?x=Rj9X_z<b]fgiѤݡa{+t.S@3(`M8KaU溥70S/b*^GKJ j Q]ۏz,_5\mUtPpޡ+|u,I JYIF2mr9IoZxZ"Pu8Pr8Ӗ\Ě!{n)ĠJ"b(J""C/n#EҦ&CE$I+Tg^ՅfbY@3H1{ GA*kHm?0iiws2 c9dN8mm(eTGb)``fRԁ% wpy*yՅ"PՒ:u=] Wj9"P"j#a?&ДvYyZFVpa>^k2&Ԑ1oIզ=u&tXn*F䶃> YN`O-Nb$3ݹMEL/R 782C쥕BX幭 | MG}lqsAFu~E~ʅ3^(`F<]w( ՜D_·P6BD%ODRw;lv-:yQ?y"PJtLVEW>na:3R)ǫ6j.d֩=g/BW5ѿ07 29>/TyT4],<^噓GI\<˜V0ȩ*F( Uz\f~xfeg@-LO\)I3siLdTQwXXC%Ǘg`;;mwPu.$@Z7Ab@@XZI!zmwC~^}>'ayέBԖqּCwt̓]k>ɥ<3 ,v0*Uvx;Wa!*-ޔJ"wi&tJW0z$"P)g=*wQmM5͸'@N n;٢!RcL-fwAƀjV-h?] 2եf7X"~j Wы ch~PV#E 2~[>?>8jXo9ΖC-DB>b eTKV%+*: fQVj0T)VAȐŕR[ad\SH reg9oR/S}LZ,qД29%,"5z>'csHAƾ/G `AoZʏz-%^d%ޙ6w(=#TwTi*: fq""߹|W ]i͡ӌd6c𙥎fF)kT> zE]d ::jK\Azb0$;&$ 2yLH靈d9;7JO<5v)۳m/b`ye׷tzԽk}ޭ0wo/>ja&f-2Q5d,c9!ZN ‚:/"HIulҾR*!=Z) LGv j1¿>-Qd| `!{Ge|Ę,Io}3(B7 2G8qߣhH'iSgI>c8aZJMDO\wp]<˷K1[ _EөXugAЏAts jB3Wթ,wNsIgT[k$AFa'Znb4 9 4IJL&eRCLnPyrJPUC,^}q<¥ODcEڐöc-nEYeJ|$9}Y<~i8_EZFaro;{I# iZz<}%EBXd-g0R`ԭ8|8Sw P˺K=)v'z@^`^Aݞ.!O]B(cy+ǻ[e(Ư>|f [򐭌ZMǡ mv%]oW2ck|B 2UY{-ƃRCR DŢ9Id'A1ZT9`ˇRK\6eV^"I"}ÂفAOtC4[\Ayv,md]kvO-xgL&\#4vMȜ۔9bwI/ B]޽թNA pF-"TSK%c_-Jj5V9 ˪dX}p -ʴxrB:]dqǚPɕRSfQc{? {<šƜ<'Rd:B&cHOC}3$|yRi*KQ1=Ͻ-šp_(,_Wpy?^E66ſ߯y?l/Pq7e"v^?grJOP ~ic<4,C;dŰǭ7ɠ]?>lyO)|߅c薾JZ=% o]zJSă.FYMf. >xr.aRߍFOWy1O_nx<^ojkե]}{e9%{^GQTUEk} ?l(M/'AzO\brb{t&;WtR}x޼{^,/e< \wG |F9-tUzTC.jY*igxiviw)4}zwv2V)ѪmW %} 㨽 Ԩk[%{tHZ-$- vo!V:.<]ۯ/Y_x6Z8ږy-QM<ތ,Zi54oDV7g!bEoe9q(Db ezA9DH.*)+Kfm'B{b+&ƫxtW&OA 9Ҧ̫Û/\8EKDN 1֢e3nbQ.dO%PꫤV!Zý2x;.S*L3"~맪ӭ-QYEd{8n_1 .l;rA@ITݶROt=Eɶd[nʙ N%bX!Em0idP<>ynH]K=p^3հNKQL8--Y/B釃t[bQtes5'eLq=EuRb;"O3+ojN'b`Ӡ1c$Sر㝑ڋFwzEEKi:8#.n WA!4oK?TN,i{M3>u'O-ufz4[~;Mi%ԨxgXFZsXUdWd\Ɏ-YrpKaYS|z( q%BHlJ|1aFoG e;E/hVL_2{n=7|-lBd$`F\|FB$YƵ %AIhS(&x-9̈k"/Hڄ#Ną| O‡@1ؚQ!ؒhDC#pMąguNރ )xטƉnNƬ̷&' }Y15*"E˓8F$>.5P1@n|+4E%E1)RA#>E5DI∋IzFS~mݏ e"ֽi~ݴs>bF[ W3Đ 4sd7Et`A Ukdt g`NKk$JZYAn\Nh@1_˧`PmDIxĻF@~jPEgjXDO}\F`Zd*C1%m 4-|p{_DUi +w)i؉A.1QN)5$wV)ȕ,d~ ry=-b}=Ll$Z6aVT[$MCe<*%wvjݍpF>D`^ bbO~kEII :|L+0Vj@۲r1qGV8ሦu&H]J ϜKT.m 5}~ *q1_RAc IDJv[1z{GG٣ U!d, Ua"[n_d 7l t![`` m0xݶD؏,(D @D 8@FɘuQvZ$w $C@yӲrI?N&y@'tDTPQ]UuH!nE FB>kĎmZ*«%Z(K2o*)N 67g: fV|b/7G9Ւ"w0FAIW$J[ɬV|ízNMh 4h !Q R̝‘*N$ ^&Ĺ Rėx^U->lm-> ^">khq'5h*Z [^lb"ީmq-c'e?XecuJHLVP]/Q fIN J$K_ֆ30fx  xAo5F<?bE5t,~4YcfiGR|r!20~j89B†z ITiBOȝ`(жA0[Wi޺:WbѕS2L_LkH@OH& eD$gzvQ(Q\;ޖ4mr1EZ N<IMR%SH鐁9e@̸m xWi*Xظ $$Q%^U3Cka/FtIבv'{$14J]XO* t~4(1릳0>(A ggep׻`;0*S+{hq,8L S1PW;~؏:KtDU>CC FJ0BOoGx1f#Q0/JѸK'>7]Y>>.">7hﮮF]Fןz2NF{b,n`谝F!mH—u6-'w"c8W9 hh+ d r[H!RS(21nmר x}t. zs)cK(2Gd B6zxb~\}U u'UahBIXZԒQI)Z΄կ<7 G ըpZ AXR_ۇqo468u2Ү|(j&F{w`pWؼzxo u(}(?Mfk6ml=3-j5IRv6׷Y ++_9f y]585sTK >t@ I n -pgW*V-@*5賓`)r0U$ w Yh|zw|Nlkf6h afr/,:nEU<嚱t4\[F =pꭀ%\&ԩ@-?1G B`}=IbqvRUf*ǩ.*%Ur0d_b@8NQU++=[wxoEnvSh3CSZg3JKβ[Tu\i,VETzY`֙͜?֙a2zg]kq z;SrPCGя2b\I)ʄx+wβn 1i{ils VdCS8#)eRU*["K(Lđ'K|^ @\`Qx5CT1Khj.n/m th BQ##v|C'/i Qhkc.~ݢm`2Eu}G%P݈G#Ew\Jt}5purlE.7$l{+tyy6"=_=lt5=sW%`waÜ!7}wU登*[M>|1fw77#|߆fv(}?R?.r?q 5jܯ',ɻϕ[TϦ/Yvt svrĕ/0mv?I?JdkӸ=Navmyjq=7iϒyVWG2țѷֻz0$ӣp ?\=Y6';J)WY499V&2#2R 㮨o,Xݒ ^8\S#߯YH-SW!g _w6a@1agt\>N&|G땂PKԏ&P+=p>rb4|tO m: %k#㦮mW;Inaߕ R3l%/z56O}nZsփ\lffKGU.(jwp,}]o;j}HsV/CpoW2ahxΓˢg?r3r(B|k|!g' m"CUTE 6\** p%whx4h3&Uh3TW[|K_?8"V Lu۶p+$v5S +{ae[E9/4[|TBF?/wQυ6𽟻E]pUeZn U4Y=^cg=.UR|I|R܌v۳SkH:@L())D k@wʒ\ݲ V0! rn( l0|8rxw6{VD$VJ>M& -`ɗpʷtZc Ji($A ]g\JCiʘ!$QB>l) N*ׯ/_~UboHڣTK}'UmZ6PLd|Ux<5G48"y{7[dzU 1_780nAcYVVf& Zh k*U~\ddd?adD1+\T(BrߢcBq1Y* 6WG'3N_ĝHK 6@|X$Z ܣl:s?xQhpC;]f<kl_ .Sl]rNG|*u5&!+?2o;agl_PoW>Y-ޘо[|N46 *VL# .jdYI%`zT*3wu@SpvݽU3޳EISp~W.$;ʻ;u. ɽV8!MBĹE;lmZÃٖϒ_uF^,|$^ JXDTk,vYGNO4v/}piQ{ˢF } yZk<_cM(n?tLWzS=c)\чQ ' j+2'B9ѽ۞6AķCb],zxwט``T&]lw,QLL FN+8 UIcB;_n l0sϩ2%F8N^{1{UD%1!B%σ~ F0} ')`qZ˶=9U(#?662PJ(,APIPf~,|֗vx>Tv»ǦW\m~tWNk9lZQ;`CUiE~jSQ`v>/i38d |go~<}NOya" {sGLi$}pny/}m>$:*U5P뮴:iUU>zN)'%wb >B}?WK_J_j>Gdշ_ h|Ui[Lyf/ç"5R^ 9':Lk\<;[i1HFW^*A<~soyC_\. 90\ٻQ g?_!FP!/vO:o@_zؕ'H Vs'ZcU9H_--vqa d8(/Ox;݄cy3͵˯1E</VV/:9>o@6wImw߮F}FgrOFUXuܼyep˥g_|igCQUzO9}).Wd~$zMEHH*V"MC)x XF~+!E'gB~ɞAVA,qq_TWdR?q'Jt9Pq}訨+tK<=D?okgʘ ZeIē\x=qh})Y_5lyzƅg 7~@5΍:+tܖjNMs.+s 7:hF$ u@[@\nbK\m3QY٭>LݳI/la6}j (vd/kPYm2 ?w~`_gۉuӜH/(hQգkU7/z޵ޟRpPK'8OJ1G@2%bR5,ֻ:z#fu>9vapq6[ I+B^@gB t 쟼 Wdc(]3{wJn|q˜OpЖ d/_.2D{Nc&G+>w /tx\K{_D/ekD7Br)(@ϸGs3_RvKBPoQ%|, d|XvQaڿnAlj3ٜG%Ѻ?U'+,-VEg%Ro/+܆M8uy Mأf/kOa~]qF hw>YʃĬv _ Dz.4K6[kࢄ׃s6wZr3Dk Hk )SѣW|\l}%3W~ldRi+"rv6*E. h WjL; mu8l+OD?\ͼ;¤.Q\ Iʧlj@`dz? </WcE$! !zgy?ckCE"sP&H k!.4 18u?#Jl|^.~< 粳!s`[.و@R$6mh |ޮan}p/3 Ѱ qJfB9N#*5s/2寷j^Kǫ@7qn_ Y+<VOyPT&Qmzٔre󱑉A!Z@FQXtpP:m4i)Qq,yD1f8(e{ P POuV@u繳6=|ѾtR!1BOX2io.#"&-l4\>93 @riQj 5B84Lr58Zu><,*ޔe{`EۜN ]rXА(U~?nnGpݒ~)TT#b&gcFYneL+>eqECV!q|G+pN_ gU. (ؖT}8PM ^[ 4:`j.)d4Eсj B  ht}]"l?#=YeDq(M嶬Ia]+ Y 1aE ɌK0.vQT(syƸ(:?/?ϙN+f1GHFFTzr.T\;Ñ&HX-Q&u>~v/18?`9Czrpd8y;H h NcC98Eu@FkpLڔ,{#odyʹ8NxqDFQ %u`sVJ€ h #+,DH>0V xkٱWHX0^!f6mfc"Eu e\q٧ɞU  h\:1'$cȊ+^5')̍F$XgN'1.Z מTOn>ʯ Qn~~)(f@FcpLM⟥%𫁅"Q9L M"Emw1VdZsm9㻱y@fch}y"PӰTa_O^TcjdxeEϛben<Վ h `Sx-`O%%  CP\ZSLböH'W:vQl h 0㤚R!_\610$ֵ c820(xRpm0jM2IzQģ>TL{*3$u]ɮ6FBҶ#)zXg~6&[á+Iz[Wnr|cH*d$Wd$u:%Q𸛅tpPav$12>/^ˋjdZoft"倌QnD)Ngd@Fcp 9?,WVo ´iy]1M JNd44{8"; 58 "2S 1oLP..֠1"TT= 18]y_jL{UT7ȴ(ձTI]EI=J>ZMGRæ<&!✰SpQ@aK>ݤrD{Ӏh\߉@a)jA< 18d"~O]i}p'F(K h Nm'$(9eGw 5H')[*"ӯlIK03Km ܢa{6ƱB"Fu@Y㈲ے("tOf =cB1qI"Y-XN<F h PʢΆݪ+ iv HTk$I `kh.Ȁf "~DL/i8LW\*o3rYo@\Y!O% .^FX(:32#&]BV><3<͸x#3$b/x)9 \a5pMDyr^쵗5i\?HaDW1GW1lMiNvA4n6)al+%a5 E&OR!oWԤaFs*B0Qqэ; 18֫Q2%#;duAq*$-:7H?(%d4GTu/ۧ`YdVa,;NHmNhc^ce/18Rv3/ ZtfNpne/5ѕ>-EZHzD$&ֱDeo*OCH$@^F[p^b #jJuh V݌qt*`&-`(V+LL-b2V?FVplM{#r53p)bILU6%Ah477hl P}tV2 ' V2MKmd5KV߬s0R~8RPN[}YEtπmlM\v&#` ;noopp =w޵$Bb ws,0`n&g4" I;&%%K6%r8( U3 cdRI:q30Sw+.6l* Iƚ1O͡P3`AV*^~̋67Տ emgIIcR˔`;,q+ _Fv8O9or^fˇ<ֆx 6`1v,6*A(DT0F+bn7ml0s) cR kށE8^eL/|gig?>)30?1ޔ˩ΠR05NUyt=kB?J $vlVXq+P,rE̷:w6~;oQtREzl+ObBBkB@c(5R}lj^o^Ma8[S׺Tyߝ䯁%D (Vn *ԓg+rŠ_y[S #՛Bgex' wBV'<_g_}yo.MikcB1U"u z~VW-9 k$]4o^ו6õ n}݌Bڸz>iQYӘ RVDiIͤJYHKKnKL2m;ӏmwaCROǙiOcP۔BDԿl\:PՏdy碖Szb멢EÁ@C/&܊w{\D {nipcZYƩ-2DpNyȚh'DG?Fk(s4GU޲sZ[Zy󮗌=JV.nk`fy UC/^sA#[ijD%cǽz/VN}8sҽ:-;Rê4bߧNJGeeRv'+m`6sfR/A(\LA']^̔xDT:y>);&&_ uLAfOpI8̇`>51G=nsA~KSMb='E"(n/`)c*% gik3)CX!ǔ$6Vp)L5c֥?+ysp|:̈U$ZWX O hO9fIG{ݰQU|Pysm"Z7N?œێ-k2{LK^ ]D:r ~:6U?Ef2{F"'hpP?}li:0uMe&vfBrm.֙ O9w+^4Zһ¬>cXoyV?!m] z^_/eDI>mgY-KQC\9v}Ȯ+,¦R뷣qm0K"wna47T$J"uC!bph;D"~Kg;ob>$ R42`SŨC:N$jxdj0c~@CGD$$i&EkN҉aJ ~6S"i/,ubs)vZMIBD>O^Sk&hVo&yx675vCG>6)||zgWB̃w*DPUiGN1V>Kw 13hPo}hcN\Cd'_n廓~َ}n ]; 3ṃ>qVJRPm1():Vc*h\UډǙɧ uE1:lGu1",OyYLCcP<ݓO;Ʒ~m|=P8cT"M)"5:M!kx,Mb))sW`D[M~:kNꎳqc4 'zew7468H L"2 Hs9:f+>6 |Һ #[Ws}5%\Ȗzíp)>MpKi=At˙IXR ~S-k@h \&AԮG:lo@ܼ'b;. ]ngzl6Hi Q[4@0Zb"T97'FC؀5p,43.I%C`*L5 P&ñpkA Ng61lٹ2ؠcHǔDI%Hy.b)qǑ NpSA@Ԅcб- 6h155Jj@Mkӫ:%ϧ_/77xI~P)1)5 ebƮC$ZC8C{؀^7twP2ng};=v/pǞ9%hڞ q(~J "֐Op%ޔ9+pv)[9 Pm3Tc+yȏƳ*. +b:ػ3+FKIb4NugM3gBt>#`^0!Lg, FYyS N JRTXp?0o%NlzA{wr҅ӯ6OۂäK؏m1۵z8 ~wy/i E$*pe<'<302ťɧ<)۵_}]1_=tMhN_7|4-Hd@[˶^n$ UXbbGM3YW\QJ+oH>uJvd5%7ˌj;>?N-}Ԏ^iE7#BvǨ~dl#e;SX Fc#%)L5i̯ b8RC㪘M|gisv4o{<ڜapp6zmi㹪xD gyɩ[LLx2N@n5q*Ax9޳>9-môOֽtgn^Zjz'?T~=Iް{>9nXGKy=˰8\5zݍ%y^F< Fwcг/u]A gthBoS&x[ %̈́#EFUYfHaۦYZ]F]:{qz(A#%xThU n\]Kȑ+BlYsz ؞=|Vk[%H$$RbU%%Q tA L22"GF9HĿn.Jo}ƥ+3$:oC:e^Nj1؞&/ 1hI H ߶@۝cwsEtoao]Ŭ?Mק#-=?=_+~f?eˇoEBakxv^ύ.`V/2qq.k$fn|]xv Nkk]PV}ѬɘG*on$s{swzۥ 6N{VI3~q0m:9$B]p]; m}6Svd(a0edK7Otf,*ᨒB*) QQk{7~.}7LPd6 Р3[`!)D&@}Nv )g[v R@k֓ifБ@_ Ι//4:$- AzCDzmX"rIu}HRݭ9Vvj'ʐ)x)ƲIANoM=}4k `SѪKre޲忿Z/˪ Sa\{]ruv;ʯJRT%Yuwd\q:l#X8h>Ab!ES;(,`<)*g_-ƻF݆TeU\JHT8XHHZI$LJD 'Q"KAA;9GUoJ:f^ZYh+M㕦;xY{/_l2}W4#̦R6N!,bie+ n<5iD7x{R~}X;(x# z_MN9~p~^I5}Kr& wN9ǨG-tjFi-!!"C-T>`i$ՌkĒG1VHwUpn4Pw:4t}D@2 YW/^: 4a ij5@|ܹ}f[~{ZLsaeW1kމK:XL*|*Lpoz Q-g[/5 vŃVR|$v+Cҵ}nU}#&A&~{0GeZ#^F4##ihHpBY]$SƕтB`jL! P`ǃ0Y C ^0\Pzj q$rqb2edL $TG 3(Hj#cA0&YD8VSRX2KZ,iCJڨ%l#ֻc3[!_g<m*f?ˇIUr`}g=wA6 [3PH JN7GVq?K@| כo煾[퉡o~yȣvXqObu.vu'$tFlSSao7M Oq~(dt{;^?1nsa8{o]Ux9Κ;`z -Wfyպ/'cgmf cNYAtFK)蕡o p=gF0 Vl)e)kYb-;㴒/g!KP\?%4tP[H4P8<SDj)8˰Oa!$u~|ڠƻ욞Ǒڲ]9¦scďLF1.<)D`,8O[ivzx^~M'@S 8 *ONRd ]z/?>ɖR-i7-L(O{qV^dQqGM~y .}6?_司n{2d7yp͟MBnOoy^BV |xq>}חE¢]!l|7)C_9aeUȴ촺Z}e|uclb3ź;V} 3B!z4 L)4Z9T l!9%)$"f6LܖI-Zd ^d Z),mq' ! f% :~ uaP uP'Nc9GҌx |~C7C^=x2 X?I_i 0H9F8LU3& %ZZb]i!%[A$b)fNq76>WuRsa߻xxع-@&[ꖗX<PeN͇*|2̇*|2̇*|2 d$9O>G(r|Γy9O><,9O>gUiIF Is|Γy9O>0Q&',bn›,iyYz /U_/w%OwɞZ u8vrm0ݯo_4A˰R8/}/:7'/L>vlw-N-N# 4ECILk^,ڧ[:^"}"OHs0=/9EpaEDu8 m,a mcK|e>[;6-wfkgbydgUv_ z{Rn^1A\yXֿ>MVcOekE r@Jsf+I."u(1pJСKL$pfBX 9h?c~Dt0.dCl yBf@hs`X,e4\ @`b./cJAyn0=WÎ9Mloa ڭV:Dփ5YDr[d*[^i zEhl>?DO,NGo}:ѵP^SnP uv: "1& 2ӳذ*")FCBWI/cq9Z J#&sENm-SO\/ ݯ5V|0hamw+\^P7XH1.0j6[|MWWeܒ1.TdZ8}4JP n-ӌe W}1D2 P|4@ǔ)ė$YqjAp-Ynڹ5,jЄq:T$l>}6~& HzH:vaᐩ9 y(+fcIfP=e>#pvf"=ctV'Ʌ%t˱!a"eGٸ\fnYw\/~nB(HBk cN3i.d;\Jao y5Dgh#Yk_rͺKwkT5i*9FV"ȕRy(tBc*Q ,3B8`,<Jī[m V^ZE 0143RY\j"xBȩ# EVtM3Z5Mͷm)Wۨz{j/w tٕϛlI& Prhݯ^GZ[ۡ7: @0c*6H5j0I `Fk/ G*a#c% b(P963=mAVt]xR5VkB""ΨʽZzD%QOtF7Fd"NyS2Zܦu}x+^_TMT {H&]x_2ZS A;wX|`)ZLU3&ؒqAĺ CJE|zA71]1_;#oeSb|*b?uJ`Kaut7/Ryk!àuT,Gٻ+`oU?xi\)HYM (%$A䁵{,qX2(/J֫S.tcpd^G3u hс9 A j΁0)s5̶v1~f_d{[wb"iti0W.|*Lpo[L1DAR`Z=Q2j_TjU>44:`i6La&~\Azjnê'H6e!e8CH-??.+&Q4hXs bJw.5S,ooBرWlJ >_r-:VdGmØJzNߪoUҷ*[JV%}{sӼ֪#۲wllSFr͖L5gk[rZLDa2 4m'L}NWFCVs ^ weT82L#/#b8ɥF`&Z%G<OAÄ2L2B"CGUXvt mIF|赐?tAVrPt `(圥YOZOt2u <=zւgC>es\֫0(4y1*&)qNc 1Z n3 $ܘ z>tӳ&B˾T_0w;LS}K>sg-/ `'ť|8W!.o`ҷ@ω=^~>Ow]%Sf /Ui&J߮Cy*XS4؅.H` `GM΂XAPEo͒@P&d414+#ceqNVC4F8=2F]nf`QroSƋDxxI2>V4DCZ%;lw!c4N8NCJG3mO++!:l6̶ NANPg\1)Da~\5_Ձ'JOU~:'~DʦSCʹf'#yD'hrR}.' }Dlܘ"tx:)Gcwԕxs'GӒt]2;M{r|:#A}SGtkM2=iLPqdsgVýO/g9>mO'D ;ˑCs5lU~aWazM\eI`RGSJmΌ6`֏dʒ![_}%K %K Hl7<6.]hl톐_)/yy S^`z^`o ]ҟK!=rRVbrJٔK*Ljd<)H>L )#"0+Cʘtkנ~2bZ or֗EAnvss=P=-;r:קi#Admm 4wF-rD`p'&Җq9qBOE㎜o'K/=l_;xjIޟ"VN^NEz_7E݈VZ_~ÛATgߝt{TNhNPdR< FSހ-! i0ugS5mwb9HC #+$Rbr/6 H)5؁AK`XE‡d!!vq XP֌rT&A ҧ@mr>oC+TvuCJM4$1 AH&ŜeHpi~ xZ=3(^? TjOy/ :{}#`hg΃IS2Z|k^ϋy*y* o}84eCUQ\k;"L?g5R,FYK!X~hQ} zz$o8_)X/ /'C0!B*ε S띱Vc&ye4zl5ͭ y!!qO'`i8_wĐiazs!,$S{+%R :ީN|f+>O']ĹP>0;.lG搋]'%llte{|^dP2zEQPG Vp).}Ԗx ,hjL.l^r蹫!wV6NIۗTX r?,vyہ_=hi4{P5LQ4x$N0+D{ZDJGt`ZD"rR!6 ȅ4*L1S |@N@k c}9l4s< 10Ja`(QHLd$&F:łKs`&9bkv8-{N9.99H#cUJTu.GYë}䮋wqw}_GbCB&"M 009:ouWap}/'!DQB@Jo|<ZȖz%"7L@d߾WM'h+ݴ{Jj ̺J+<v[,za&HM +F%| R$%i,+ҧiFZ)=2JN  p܅=]C_4o#mCFm]π{ӑAQz3ռe;ҽj"^f(v:>dz\vfrqU8a(="yLD^ȵ\@g ⳠRe$ .o'Ɖ8Ercc," WH!k<z `!5nv SCjscKdJAIW̆eͰHI'q2 gPoɖRQeM&AL\c$Njkl8 t(Uj8-R{09sH[GS2> "DYX8s-:O\_U Mf!NTgnJZJ _U/~fOX n]n -˵70ysEʀHó+J,ICNQф]$D˧8[g?NX ,J\QNRXBEP!Q}OO%RJ }+PGX?gdy6" ;`7OeVA R.\r{ٽ20ý~CFC0/`Ie){&۷W"a*;4&@ՠC`3=Ux{-1Jrq8Js63MኧnPh2S_Hl( ]XN=o=3`;f B5?ROaK@rFϵ,wJÃ&|\tM r1y1 \Aw! WhT4IQN)nμimw1c;`[!DODX}ƧKD]+Q'JœHi{_Yя8ѵե"*v]S`41YOEجvHq )u>GG e^%F?"ZP PH(ђ;82*`H{CO;:}$S7_Iƙ`%c ђHɶŐ%u6 !TxB$yl!j1v+h2DKl!Llc M W#(۔-ls& pCŅZa'c`Ce"\Zj^pIقINXk.+&Q a1Α̐<\H 'K0h[DUhRd2)}u >͖ͤFa9sQՑ%fY;^x`гNdm -J #3{;]YU[ Od瞻6sש4g(Je)°ʖ}Dwt ԹXjbJ`E誠H$C?V_{^ `=/eD_e0.Ƴl&^rTiƶXZl8̲ WK-}=aC0U3X/gܧXZ)}YQo#!XXV.4YHnd6G/HsDف ֭7ߦRe_.Jذ.JvvG$K5y/2.ob8mBr)I8q^pgEG/?(h$열w%g山FN4ȪvaClA֪-e zҪͤ?x+T^W_Ifbwh3?,{+ r)1wcWU. ނI޼6ZlkmFE/ |? ̇A&X 0 X X,ҭmlwAe{mS~tnKW%z"벲c4y\fI U{HhCyZu]tv3trŅD>4I DgZsC~ų.;%wGKձ>>=igܗAf1*O}29xQ/-_ B@=*L:o1$ ,^˿枸J(,ELϗnOwu &KaݼT3qIN._=}&UcSL>N'_iLb[M´&qB:#{2,D qe,`9Ckn~ٜb?f)BjwN*MxUsZ Lqe\e@ԁx!zDn gMq82|љ%NfȻRZh'֨t)=n %| |/QD)%\6/ Q؋d)auֳtRmCB6 Q4 5e:26GB %f V"haY%yr= Ƥe+D1]1Y"egK.u!xeJIiP,AhLiC|#x-7.odAC Lf2%A!dt6GB F6sd 3PA88 &jmJRNZI D+D)V魊 p.u#(mr2W{Hh)+ZO(24o@@H@>&YQ:5x +V(l&v,6']&7*u$%DVb"f:l):SV: 8(>1c#|J)2qO ):o&fnRIBn -qhP+Y&PZkVJKJJZ-xixk)qi9C4r2&SI_Ft| Mjo$rrI[@ ^ 0< 5xM wa&3d|U%u>Z'ZwQ Y.0꨽3 H}=$ ୳ x_\L+QSS=ڒ)g'ZLZP)"=$<:oh)i F.!N41w-Pǒk]9 іw܄V71?}=fh}lHU.=EUW^3*H呑 {H hFQsGB RJ(y5(, YnDB.Zymޑ&49{LE+Smc]UЭ"KWUa@G k<>ulͷ=H/A4bbŷ4t! xӼ$HSDdd 2ob$%wUYHhz#&X4,_,ezUE>ZwyM@mL%ti5PyGT%CB ^/wBH Uu1P>5ɩOFhS KL +R6wge*)2 2T$1JOY $ב$z esDQjy&EHh^_\Sf2g 6JA5MWHhзvD)$rTDNk"%8Vw@XMHh^;πRhԄ|uMP ² E(u<4GB ګ<Asȉd ];>Z7޴yN\l$-2%iR$kXD@i:HN -[ռhJKiZJK-٢e}$_}ce+BFn@fU(76GB r<XjbF\r2')Q=$5o91%;4Z&r2YdHh>hٚ6pY.0me %$P#x-irJm>]'˿u1dC,ǐcȆe5?ͦo+GnK TYUqs<3u=g=*7f\95Ez3w: Y8bp@ *ց.BcOa6?Ӆ&x_ J'qv⦻fq^tRC-wY,2k]*v^E t{yphi/EIGeT0O*fTlyat;'.\+]V_介iWևڕaeh=t~:)Jr؆tyH$M?]Gh1Ғ~Il?޼T]!9;UgE1bd: ǗժޒWw_nAj '-kE cQVF-s* &^+jxh5_/Anֻ Y,X]|g}aR~q|ry>?Y=p ҷdF4`6|`7ݼJNevpXe9% DIVROHĤdZ$[M l}d:O92-maZtEbյ5ރgrM\zϿl5~\2wX`aaneփ6((B=*7z;5v A!5mCכ(ڶqզW})XU| ^5y%=*fO4Y}  NZRW7 ;2ҵ\+3WyHڵu՞ԫ­G0˭*VK5YxozVѭEl`B%^̦UiG._}}OGRӅUԎd2 -XZXZXZXb% Sf-dM%+I/"J)gŐT7B!%:2XӣV4_$ (c"[w53O244kWPtA;;w_lǀO-=S jvT:.ZnO(cq+NNG}tB%zͩQdߍfH3>c `GRj{mp㶆n[JmjwؿLߦӋѿ?NW/w|0EkJ,wˏ_wVi[  љ@SPUs7kjئwBp02y}D'Dhޕ$"ijy >.Π1EI6}#HETEHgw[mU%Q_DJIaBt RDGe֢Kl!8:G}Rr-0'f^ bvh*#F9dPV\یFAcP / oC>i$CqQQ^׏5j2"rB57Qv â YZqcKǔڊ#@'F5^v{~/Z-t]45Gx8sqa-/*O|8AH˸*9dq 2&s1HUG&D0ҧR~]8JךѦL?:юG2,ЭBw|~Y~ C(E'^Rj)KМg"4꼋I[)n p`'\2k}vs}{.0|FLqh*GTKB cL#eqPVZ失G]!q\{Dͅ:PHԄtܢl}V p(b% xCP^%3QD' )&ڜ^D9u:h"IRE#DKTR<fb"D=|7=U B+!MB%f$ZWvN1*NYH(ĒK3mJ! X[ 6eWNDN @<*7JaǜFv{僶 ~?e0|ں?%63[?[i۷m rphnI5As$LA!.|'pK7GRXV)nH$bh<$ǼYƩ cn1h%w!Dnq]G~@գkj h=-J#[g'LZd8#h*-v .YGG **w220vetwӨ&M5hS^YWȺGU>6w*FBBdv'FiB2:Aso%W&]HdN#Zw_mv̳ܙtQWGWkpANJg gWF¯g[Yϝq$9#+>%F}js3;otjz~4kDV`tUEͣ{N;*??8 ??:"A#Ď 'Dcpa0sܼ++W9XqPm'ZAd w(DklДypcPyX*d4ꍳdZ88=Zۼk _.]\g AznlDQL*ؖ3⩣F#:I9Tjv2ڕ;Tsp^HO|vW}ݿ^6za͑]qW炻N:y'{׭L]״^Ko '^x:@QCW[z0EJrdt Cה9';_:GA 5 Y߃a}I✻$T;6T7HO]}d*mdەN ;h:Ϯf[~ 9N/+j3VЉd0:i0oFM{%j欯f߿]g+'0%x1q|!2p`2VM'DL5vH}ֲsy<.V8]}zޣi1f9w㯳 ϰ7T$42\R9XV[VI{SKV /E!H>X|rFMϩ28^L yLLfzKrL爂 5T*Q>S轀0PBr;C<}`;$z%{]^]2|vp~HyPixVir9Phc"1MP )K$)PKsk1ٛ x˨Dג<\MF?rHG%k2 wABS!K+t"8}q6v싘mNakh.h7z8=~m}ޑ'etbsZ{|*RBr:ZR8e([n *$څAV@%M(ڭlEZJzFym_q)-n~Oŀʎƣ'fh#9Y \*/IEMb`"|L+݇1( IXRYﴠ07S e= )D%L{{- !^.$Gd╋X1)!R(S<(`) k2Jx#eU|.ۇ58-I+vJIj ;P'!55CC9>*Uu.aŁr](-' 9sE7r[[+~\.+)Y%;uv:TMN 14|뢏gSHF~~<uo[]P@PMjP1![,XCBFpXx_;>~fo˭ȣ;qG|ͧs|γ,ND|~pǧ Ú:ptx$d~1xS~ p8cՓ_|'o<9Ky3_܆[$mGLzԛԾap${mqY^?o,#>qM YČŕ Rt~zA>6׏l92+]1yBˁ'2vmp@Vn-"&1}2be`e\`[(#;B:OtR{גr=4Jm*BQrp:q҃o(Q<-w%݈9u$q m(ZI^z͟gky[͘zsB &DT^KN `@*=.S\g~c,3\jG#u ol|3[r}*1Èq,JlBEM@U4ܢޫ_]2&U.F+W3[IJ¿E#BAQp%ڳ쒺ނ}ٝU6X1ې^A+z+کxle}.T:=m͇Xt:Y9|Y?8^t o-݃) 9)y F~6JJ gtb K/ΚQp#Y~6Y)΂aTYMTB+o)a\荠(\,K"aiwXvE&J;,K;,K;,KÂJ{C>*FBB$%Di; V$Netg JL) O{'Ƿ4}&ɾG#_=>u$>TS7 /)窔|,%KR|,%KR|,%KR|,%KRZ)XJ>[54:4敓nkOh+iWK]Hm1|sb91,ܶv*z>JB\S4fE#$Md\+FJ/'IJ!K~5xzxz4Zl$DJSCZ&IZ}Bۈk"jQn](]s$.gJFɃBC J8U oV\&, $<* 7,>Ih:-)/-m_jC k;k-hiQzoP Eؿx)@wI_!% v(! uf76Na3iFQpUzY%YH)vwu=~U6!aFF-!DI@ a^HbZa f鐜3z 9YeyOS LQ=082aܜ3H83k~_DXqh%,ՀK*OE6EJ o" b^χ)ݏtub??>VvðNnQz2gF? 2R VCMK@r7ceJW*SwzLǥ~a¨RFbȳ Υ%bO2J|OԔU甝bPaA$ξ5c)bl\dfT?_:|=Du?[E6J*WOo n&Ll"EtE2C:X@j#ιb:(¥t)N|vm(#:mqkrTat%Nqt ftDNKtBdJOlM<6((_mcH{JRmea\v u0dڟWl)JL[a,: .6v? ^/z$~˹3y9*))n {eov`i^?gq|eTbuw>dC6߯8/K1r#=-6 r;DD14pҙ םp4x5\׉a^NMW"I\'D)M7[ @m/d*u |?Z=dg {Y]샓plcKri>q?(K+Fц\MKU}jmAAI֏.!hY;*1a8*uP0Q1pp3'tٟnYBvT:_dݨ5k!z +=4pM\z4@ld*^YbC)[*t>s`ޝӻo~޾{:;ނ-P` (.NçML?e9njho>4UlU|qW|\3]e [R(=~vUм`\,͜~ 3$u ޥ b~Yì_|Y"V!a01מ'HCI(s {K9!NE"!Xp̶QOE^6_ ^N}02&,^K mwf qBhyDG~6 ,UOT;(h‘ !I1U!|RtK9ORQ㎢s:,gQ\Sbyڳȿ#h{?<0ڔb~n&&®icQ-@@26FK0i cS-IN<ۈ+2[M-VF[9u?6Z)~b·@+G8\ḇ|:bnh;y$"kY4aUeNjC{7hs@s>C:"LLj9+j$AA ty5AyjCE~ȫL\"7C + /cDnWWDἮl $0IG&[WX;+ohHib~Xr[ktOh'֒txmZ_8MLG T0k5f,`ZF hFs+]:'7,.86E.J+G aLhp#LiFp #  ic̓,54K-A9N`.0 r# T)Jk8Z P|iMMK>Nk9IYŜj"^?g5ٛƬP2zE),b\*>jK ^1",IyZ&̹&k1t)]MIҪ.]'ME=r3ǯ\y:*0d J^B:`^e<8" zH"88fJb A|@N@`kCj{H0 DjeW$cZadPg($^2 YobA% 9Ij$f.eT4!c5aVZIk%혒%BWW_.U^',=ӖѳeUL0 vYPYUuQr<=\ٲ5v9&Y4<0smC6x'o[V?Fj`eE 'h}gk@#)6gF >ed _UÔی֋rm}[԰]Pn ͹? k(jl[  ɏѻĸ `Y建Ͻ a4D(xԩ[,`.sʦCsc&&m칆ɄLxFZHbt:+U2ĵvS{ F,B0xd!R#>HAPFDm*a$E1,H86&kl:{vXd5m2u'OI]vMDM/|n=+ ^_AzY%`KtEOI"x'q=8M/`VaT`L&{v`¶ojvk3V"w)?fݧ{9iNvA'7خ~zwg(_< fai(G|ն?"'BcûYRqLu_GI?8ōlgT !'Iv_PD:Me @ 0MƊ;ٻ{--ɏ\Ǯ0 fᏩ^f"~7;_P+Lz;]Ga9s>.00ILke?}֯wP6Yu2rY>zhabx\(hz or˿qv e#?`?zM^}Sd-Y#CBWtPIW[Hޭ+V^}~oREPn.9'e&TD!sq4Rs{nݎ^1BkC7Hc%Nyk;tuͻ);#]w'))c~ذ۰[yZ[K*RkȦv)DNZƋe/8d;+߆V7Fܙ۾׿BEH9{.hh1RT4v06=k\M,o{B 5 ǚV XrJS[眻-BއZ^~ 3ܼm,>hH*fnשMPtMnLi})k -* A~AƏ}BFX؞}ь!HP m=TD؞rڞ)Enfh^Z C1AX:W<8 }^PW {2l4 "6 =Ž7|ww]zō4\j Sae)n.MV|,K6ˬ_XJ-$}+7Z}T PdU Lov?˽uel~!I˨ |{Pt僳l!ϓlUDjc]6݅R>bzԩDӡ^+~P?yH-vqxݳ&Q6)ePH/LO?xށ;kŜ_u}ɤW{ۓBPBuTS]ުq_'o[_iucOsfy^ڙ;4m9Kf)NR"vH)7z=TWzEopymץ, =0<;c*7_ܾүD>|{K+zѕ>bUIi Xn>v˄.ӚTU2kʦ/_8(0sVnRnАH1 ,Qk$gJ:RT8KNPXr7V@s\1pvRn0kտYOl7¨B2[lpdYZ#WgV(Y(CL<ōݽvViR_6 ~aWLݶ ej |P#[-wLZnjUr[h)b?F1\SHўt5c{͌dޱY6[&}yJ9$wEjg6tmv[CG61=q5`WNPinΓh諢tM˳6Z CQaېw /woXFI3\x.ULQ-@3NFdT3oN1-²7slZ0ٱJ[UYxcn(YU5tc뛲4hM%+B5^Wui%K!Wtyzb {pϒ:Wn4Ԍ_•UaܹCĂ+,Ww)grT3z.wH)4c WF]5\o&-j|=Y!# km #-j<+$m7*z&j!F5@BT(h15hHP21C7D/(18-(CNH]rgW30vml:[NtP?ޗ VꆹJf?>o|7t'nZ7Ainڅzp3.\+Iэ4ɵS7.ސft!eTbέI:R}#m]GG=?4no\7}h-x~O~A<7t\|dsnٰĚOn.xp`y{'ɶsahWL6n/źUg5OR:l\` zaBE9?W6ToϲwtԖ-Ώ[!ޫ$cj΢Ey45ւ\%J)MRęHG֨H"Dsͼ Ezai` 4 9=1BItmnl:л͉AFvO?g 73\iė09[(櫇XY@uBVĥFpXKר}A3OJ,Fe(PLԚ:8|k{cC.PPI YS$elz AIRw.)9 @ @J$y>DA"#Lp$Jv5 `P::)1N9x@MrR `~DR4 _XG}UFkr#wr-:/0 w%Y ՂBV4RdsHAgf V%, EhNx:vsſσ7y- =[||MRrNjSgР2hxRNAHh|qcG$@T1w uV<A'pD&e-kj}}=c OƝ )$~Qэ yŒ:Nx};>x3OF+fv^^M(eEho"_ri{%I׏$ꑮaX];NYDS`lSǔsYBs{bN3$!Y\)D uuĴx0xuBVw/& $F3f3RPH D1RvSIEb I,8 myS^s3v*}U6g$rg7[mM`29n %u4q>\k"& 2D49oWԫ"X8\:x,WR{YΧ+w.PG(OO;o#K=]vXֽɻgc_N*qC\mbS #O}Ց1Ԝy6@mtHJ[Hu H%do UhЃ)*L>*nz~a"!mq?&M'8ZKobI`^i1շj墫Fp%9 IT{Qg)po2"h%TIn<g툳j9@\S/WX*qϓW3Xx~,H"ZOgl7 [tttd"uoƙ}F!]`X^_6Q=jD`B8 ;NYry#m!Q  XT)zS!!D e~p6CԖJๅfj.h5 q.]A$IJUM$BFcBbk˴҄fH%]x1Bμ٢p5Bz_=3j/[s C CXU&ws^ʮ.4u hJV$C!T㧰NK7GzÙGU aSr¸km#98/Iԗq68@v0ocԒm8T/") IICǀeWUuu5GHCbA\N6#ddV&v,HZo3VXؤ*Y_o}]uǓ_x2ݙuQ#QLɬPG'iHKtk$L2tHgU99`:BiW_⸺`.l b7b}0ŶcZGbZ l? %d~jn;sj4Ư:@rPAXįqKѭo}0!ʆXAk:$10RŒOz)Z )B %T>˨5P+ )K#1aW$Ij ZYr0Ek;{s{ x-ər-iމî;lL ǬXrVf%K_?\L %M p |ɜ9/iXc8nS^A$"882ctrkc69Т˞ lCV1B%Rs`2[n?ȳހ~<Ms%g_߶lڰbWt,K[7z|>83hpPa~lR]6r[R[Xtzg*g ΠTŸ# ƕq\85AN{fxE{uE.8pi.KcT97ÚMzު3q(56wLJVYXf\{L2GHD6~z=6wl쭳=)%Ogjrh?7,hB6Xŀq˘@&#|9)eg9C2dMn8Q.@]Ƒ.1<$#^Eo 탠mug7DW"42GOǷu{:gTbuO<{ӷJ"*vT`!餹Ɇ@tVh8 5e;uHvHj&WZE^Scj,davpmA.LF"/ ]A9crZd*x!]BhлSJh#O/coMʏ@$D?}^fB|jp ̒/mIŅ0WѮ' 8S̊.p$q dC`Rfǒ ʆHJRhP\h7e yOarGTpAknmQf2ᧆ ?UIeͿ;d>0 ˺ChΏ / ;P{05WNYJwuQKj۴wwp'[ZmqBN˝pwiq1%2Eftg܏||X,^ذt![ KEUG2*GLfb2 ,o[Thi*,֥m}=}gn~SzBMIp88i7 /QhΌK*B0jQeu{ᭅZ@h(ϑ_`Bdͧ35.{X=t{cȱᎸ֯i6: KBj!9d:=Mj,̴T, d+˘ՒAdAw[y?!"2Yf I*% S`Ui![%.pےUpt&$UD# SrS]$T7pizAVƳX}jen<;p_mVƫ$mU'_gs:_}?iiw-JRD] X. /9;} y}q s c^p*r&%O S^fB8H0ps7n$8.7#i7<sW D|?^̢ʛLP.N@BA#29S֊$BT1rV;goo*|4jNFSA;hmglo!3ob/40YZ7Dj2T)An!ux" $3 @ 9-8G+R:-.mHȻ@^CN=GK#ϒH#Gu.jfD")bC1 ?:p)a-KPrcmvz˙ b} FErS7reuL?:[6Mr軛˦W?{7~>Jqt`у;PKZ2^*KiGD~Ƒj^yu2UJʒ [J^r3:\\ Sξwh).﬙f?΍ nYfE N Y3,9(Z]rw83ּFqSED SeZkͤ\}=񪑸>ërjh^ŒL4[`%rSXaPpL "Ho] R%MrFxEh+ugϤy))!qĤg%\1rf'C,dWbfңRL+Ү@ Mj|EBjB*t4^HBbیsyaV)2+Ɉى1XKvzd\ՂQMn[N c.'3d"ZSA"ÀH&Q!xre alB1 .F`*-Πx$wr.ɽ.mK&w+W.:S'S[wm=zwrS^r\}j> '(褴OE(2PZy2lųd$예biϱ ݙRᵬp(5+%AXV(9Z2hAeu /im8zŵdI|zP7AA1J,A&SWI78PYSIMxuТ:X<@tGDU}l#ڲC9!haQf#"{u[S 5,I>I ؉QĿsT4 d)eǵ<4QON¢NYt.ةwg)?jrz#?{gBk&]oMga{J^\ioaSNDс{M/7x[MS{}mz>{!7{>,m?,W@Rrak2ۛ]xwʦݚ9ˤvc/.B&_r *^mWRpkbA<4,q)jclsރ?&g+dL[N]iQ~mic lQ%ed10"V@\O1N.B=ߜ- @ 8$)r0d4QDv) dZC ;# \PzS|-cBSS*T2Uq\:lAkgʌŬdۄje2^%Ox dn)6" Z]qȔhAa2E64\Uz媋Q_q 3' *qB @xlmhכܢ!44J ѺOһ1s ZIG̱Ks4^,=bwwK2yV5t<֞J&cޑB-|9iI0x]4 !8]Yo[;+Fї1%;u;}Gu\Jo#6l 'Ƶ2)YE L)*Jsf=:Q%k5}NsQ$GRu]PZBJ2Wi0=rLG@v=-4b?nxf /;toKL N?Ǭ|I?f㏗k[iQ[S;-kk"D-abes*5Nm!)dA9aCYi9D2ǚU9) ZĂZi(zմl[j}v?bfXn+ey$RU{.a  7"ri,#6O"!߃dAө>-}ݽ׳Hq$%|Pݴ["? u_#':_Qjco ^+';q9C鹏^Z"D BAh,?S$Kaqy5s4\ dT<Kّ͈f@ij*g]%TYQ2Hu$qR6͖Ex^UUu^YWYu?>t}VkCa9,Z}|s4&󸒜>ڎOj$'{kIh*iw%#*(!I*dK%D 'O\ 9aa4BÿU޹kO]Qk?řOjw- . ë?ɴˀyWE +؇$Q*Y傲V''Cr=R:؂pt X3߻Z qX1Fo&MThᨲ#fJYzks;!bGc/8N@F+uB%eSDV8="Y'42l-xlsZiWa`:\dMLDնԧu1;Y>sG 0J qrdo0㬢|@fŴHt`օi ìf.;%dɎ[Lp 3h:A(PJUAAc#i =G5\E)}ӻCe+y9ԃzQo$NRHinC{8J2ev]Vlr8jiV,z")c;Oa:uevGҊϸd'ut>4΋&3LO}Rck猿{yOI>SgۿYIm^q~R4z5 YWBtEhM&iyOQFI1o&=Hh,)\Y0{CGڽC{;C{;Es)k=sg̵3zZ\k=sg̵Σ`qG^΋&5]~ܼsz6C=7X籞1s:uNy=7ܼ7Nbqលszn^y=7:*8xt4V!gYE .j p|rV̇ʶ##!`n_3q,Qmpqw~|y5b?-@Z*Lg,&XD˅()nAw>블G(ˁm[kQpB`\+U”h47!`\nZF-Z:mEI5AV'n{ZocG_YDg7p[|ϯǃ"n{f`5L N?,tK7\%BL SZ7:rb"c,Q 8 |N1Z% AB6AJ$CxdIi*%И-g7?ހ~|u5-|w^-o?myf[DZ,$"sE ]y*W5%'cx#K;Q`q)f2eGK4{w`P'_} VcRO/9+@^S/S[֥MKձk@hu/~A?X*Mfۧ9vvKf8lBhnf{EG=f^yN=C{ǞwL]ERjX"Cwɶ%.MƼnLݏRcfuHh͞>oZs* p,[- nSKݞ3D*;AD@pȆWU AC $}lS<_K׮LG׳-ɇ ŀyfrvX~e0;_VU\wL?:$/>]+y ":t9INe1%rIO]H)i$qI. C*Y@ @قo ͞o&«:݆hnwUo ljta??|y:|9ߝ3Θ1ǘku |u~dLLq<+ $jv*ݵ'ְyw/py"i2b)"KSrk..>pa!󨱴w[dYb_N^G摋j.`džxKؚ2Wx%-ז!Wn^1n{LNFcsɜLU=qt)L;mҊhN:z.Zܹ-M7&pq7𡲃`3S\Μ^騌MIZ]EvZromS9e^#Xi*ϑ_`O&m{U\{ڸW֍?VmgY ֚hN^VtpP^P#k.ljOc 7h MtCf3Oʀa2hk Mgtҡ>uʡGO"׍ȏ6e<'(p]"|Ad2c0p' j||}Wl[OqN%.@r! 0 ^Rs#ng^[#7)Zn0ïxj82Nڤ9y]iiV 7%o./WlI9nާ"'2 r\igs\FG?b%@W-[)pؔ,"' g!%OzjZEPG@L)KAZTxҍyr(ANS;甔MݦhP\ /1~$7є(2OpkHz qؤ/_Pz-E&絤XO߇q>P1-]9BoһP IUYNkavmtJ<"IH./iDpFe=J"ڥ,qa:Бt;!gk︲ƭ mU/gPwh$q˗^?9r6ړL6i# "+:XRĝ洜][O.N]f.yf&+ pZ*hsAy!*gD+/%p(cc;}3hc7#|%$t&"pk@ djS\X _ FX)PS@]CQ6t6K*Y]j ]5 1}TpV ,8E F'jc3>o= t&#I‚1@Ž"#YD$5qCNG!=57\܂T\Q)Js %Eb D$MbQj;#eD=6{1=Y U?ϪEr/qilNF\><=6Ǚdٟ A&׀ϟfrCTly#|,>/"|zu]Ω[OgD ۪G ICDml 6P,u 5Od^n"'a2/oQBJKVbÇG4{W@&?oy}f&_H!x JuvBVOO4#Mrca .o9 fz0{kV3͞(:]v?j 1Ԓnx;ySy7eb4ش2b~~蓳6gsB *խ;9@_ M'mqY{HkX|8Nէ'^uf},ȗXR%/5ד4;ڇ9_={ߞ=~bvO^<h`YR{H$y͛4Vhoݴr|ݘ^)~>v;B;K9R/'?{VPa쯕~vApuEN,S:+dq8q8?#ROmVa+pdnL5/A81\;RgGyڹoq>[r SWbٕ[OyRfmg`R&c˺ e|]Eok;X6q#m]JQXg,[}VU6=8~.KBIS@#^r##@).dQ$%c X4„:NB'yH5*X5Hɹ4P@3M( sLx-EFՑh0..KW߲)mr?񹯯!ϫ'w1݅cr h`:zկW-K5>^:(~:}s_qUSո)Q05$o|b`[/vH] S]$-+V*W)}p*d8ߝ6ioNjoiUgxMpt-3>D0ڍסBŨXlD0> t+ZL&>" TE:9Z9(λBoѪ7]t|\ يb{F3oW7< P,lMfdBkhǜx<>:#SM)iENY:w'lk7_E`mggeBju9׿v{huEB?\YYm7'yv7m+Nxߢ3hߐ3MlZ1 -2ߦ j~+bZj  g1;'ї URRZ8,#22.՞هJ$-+$zcC@=[؜0sy5PS5sq5ZVD:e8)S1H{)N;1D&W|RHJrȝ!,SVʗ gE6!ݶoitC&-:IlW[߸UZtlƔ֖JJ'\t(L*\HJf 5؟UZ)!-ZNGcDrULZE )],{]C{$LKC.7DȀ0UJCf`nTL@#(v:06[>Ak,`Y~r]laG)DiwҬkG( $3C )ƊBtq.Wir l lt~)\<`vѺ@^xe> ˣzYɤma>#A#dvjBMlC1FEU ut"8i5kN'tBXj3j, aYIBh3KosJ޲({уEZzF@,'Ȁ >)e L% & k1ʘ"yP`͝KT1:% '\j{P)pփ Yv$o㸅 T+ ѲrU.YgN$ifBa ΃G>}`:7MRpwhE٠ Gշo?oEq}BjœQӅ\cjw5]d|'XaU3/Lc_ǯpv$E\HQUS*2sj5Ū|̊?DWٜl KeHf%T UCr%0! <?O&gEh%L/Vr 7`S7fibcv[o^mb!&|;7wTb +^j7XzzK,';aOAn ]K4Y >?JU8UIBNѭP=Z])bܾY:=oxak3M9>ێ#u:i[F75B𽝞w0wXov=]Ǥe3.y&sX_jrtxh,H< Y[Ǎ> (.FuҁQ ,Ȓr,%5tVĬ)|Q(nU:x90DѬZ0'd?łvx VK/?NF2:w~ue?ϧ&Æȫ-Gs&gµl^vb6z.5!ވ}u]]OujK>ۙȭ&C3W"ۻ.~0 \/4TкKi{XNm覇߾uвU˭wmyj|}QCk-t2iw 9j~z mt]:3D1]ѣ|:~L?n+x;xYuWNnMuv)4K@a +šq:aKGk헲N]*s@:t9TjmNI{&4 l}u[;M^lJ1vםvB_xC-ew|q&͇k-q1ɿ{YJ쨫Dr*)ڹ$S;O]Pi64 ǵJ8 j49>. w+/@Mh$>Xjw]%VK%\{[V{1qĥqv E_E|rXĥշ.m PGm0 |nY~ax&a˴5{s~olz?^I5]rrrGM9Q@. |;Sv pCpr\[-Zpk7E%eRo;;BF (('P퓖GMw-/-2bUJ2- 3.EId\5ez{ '9VqQjƕ!DV(ND$ ,\AAw}ty&"ɄQHL7{ K˛T)"KrD1ޫG-}%yTCA-|ǵ\?d%`]={Qއͷ Ǿ`Ġ{HTՍr?|FlRpP`*Sx9si_2s+k ; 3ȨK'RF+/9`(4gDͅRvЉrETYs^*Ke6^7>7c;l^a׬O:P^έ*"= ʁKi KJey$!Vf^޾78GljsDžz~x]+ъm\k"&Fd6*Z PLyoڕu!?r)Cjyqy7ߞWS l42yeCu^XϺ[,ZFںLx. KJ9MVĕXZ9zmt@4̫`˫zai䘒1|.XNOPE#]C덜chat>:vhD$oU[K+ 4&4DHxxlbKʣe׻ě j2 .J55*xm2 %sϥ^e(@LԚ(c$k{c8d.wFZMA4GKYVz#gWM}%%&i)/E)3)g}>DA"KFz=M$f½( 篑06a5 P'/$6>P]Tob\ "1\,Ee, ~Qyk}A g" .K*/u ڤjdx^ȧT[S8}cuUIJ.~kuѪBQo1W$]ޯNL(a\4qg !i~eJ|#Mf_po͓/'֍(vŒSpGfe]ZmN}u˥񷂐tHBnk=՝ìØƓZG U,'db]vYuV2BB4gDLKZ3S<{p< |/ ^©7Xy %3~w~Bށ+C݇a܈]6}κ]U)w{U|@Y,ǭaXqn/ᬹ]oʥl*rq_AOjx ]Tp*׭pkb^VIclw{\ي1UMr-SwZqa~}5u1UlLHb VFb l;S7;y29;Pynb"<ؐPr9M|(2%Ʌ'D £Sё~jQk^tNaRTYR4Kv JYe&$f .Sx %~'nUS'F*ZMzxu|ɷZqo-@9&fQweziA &hȕ *3|dŗQҢnq^\dJ< ,M(bٻrWyJp̥ 0O3E 7yVoKKSҖ-)KG1֡PUŪTفhP;i.֮ Xك쯩 }ַ_+1um,mdcz2 r{3i]mF4/dF_67L:Y QJYyN|XJf MJщ琝K=-)9.2 [jH>q0J鐔%s:Z# :ID9Ph鏴rY]=Ny#m|EȲ]ƚ=[Dcp]n3sM>2},x3$v0m cVco8#9bFZ{=tQ=x1xfxK:&54jE^zH;>lA XG&UD BBĘ]" 'ivz$NFQiaimB}"MI9Y_NXr=tYvѮ}ƇG;g8@'ZMeʾe/[(3GΔRAΌ <\ zskǚe{<2h<^'mdiSysK:KҒEGPoo؏Tx5$EVXL kvs?g,ide`FNs60R4FkSI54 IQec>PPRiW @TqM.q8;VcHn|) J"r~7cѰ݊tN1+382#)mRAcDpb$BT!C+Vy^c4J:ev=pHkDuQ֐v1`Qe1j8JL1qt,f+Sq.$}9H%Q@526gV`aZ,u,+><^Yp;9$-FۯIS$auB\(Ml#0T'Zd-v:MBPRtZCd )bIr"]2d%ȂQgȃ*6$|u wscLA -HsL)FXU\JdK*Xzб5]ӺuMۧ Z?&9Br=qq̎i0۳"z:́a*F[>~ig-J|bZ_LEY 95Ņ =]JȞө.Ku)J %aUc@,ƐvjU6#^:^ָRg'=8M'Arol`#vQ]SP4^Yx/n::ud9; *Jgc$qP`Vi?v *E)0M)n_ dJIwhz/׹ˤnW/R\ ;\vzW/UWUZ:\}@s5 pઊWUZ+vWI+ l`ઊ n߽qWUJݝW\ѫUWڡUVc*U> \際W$b0pU5j(pUuJ}W-W/Vs=6{0f"w緳?V7fI',3$poZٿq9g|hїxfB0AUVp*fͯlBiXVqAV;JW)}?"J[~wooos]Y,Po\. I  JFHeX1*z6 cVɦӕ?41!%:SL"VIJp9gt2pp^KD,hLQe tq($Umn/5ܘN,z 2T%/i%^d+:W+H'X4ƴ[E/6]]N;MZza-x~o1r&,Y0~ԗʜU<ȟE*[+AE=}')h52'|׷64P XʒN9hk& (/gmЀ*;-aY7|k;^o qI޳j~g}ZkĬكcn1m=;gC/*yg<(7g7hh {OlA XG&UD BBĘ]" 'imIyeDɼiZHt %s:SrA@IDs eHk,W(7g{8!4{'R^7OX vwGoJՂ|Gm&ߡe=<Jس>P֗B`]ӗ|COޡT\="z$\X%R:BL`jKN#s6|FR4Fj~O%\B*6\iA#T#[#\Q @T.s m|.LcHoߏT¼T՗O{_[s3P6e^,>}~-+6hjA!r# 92bO'+9FSF[A뼩'm0)^}(x c]KJQDCc n&v ~:-_~}cm==)M>=v@jY ͉n;t;kOyWc'!ִ{n' NOgF !Nmm\F鸋NBm;1EH9{BaVhlE_?VyhFOj+Z x"o&:́ ǹ@VRDQ"K~aH!`~Kܛ( )W#[oD*IoTZ9DܥZ]ExfnǭSUOecq<*@cRM8>&Zn˨$)%mFe%;R`#FAn@LȸJ96[B ]½bӿjDygQ]''iq0~]M'o%Ci[4F4 YrT¦26>[$w;d'SAk]4Joosy,luoڋul BmlHi伒&ܤ%ȅ'#tɐMGA/Qg-61'Au{c ΍r%<+T *R19(aUq)$`J=BtM5m6h ۃR,x^N 892}\m*Iv_Z9[.J,x^J-NXrfN/W'GV*̣HdL\pI֞MrvM ˃1/3$cu@Uwi\f]wOzKw$\}ܰf[~KU*{Njrș~ gUY)Mdb<nRX'"@';(FVCwƻ oGb@.7BE\xOFRp2БLF%7ˇOK?xۄD5mt̥S٨ >U'u0F\)(Fr!Nx; }'%M)-K ˑKﯯ/G-W^arQP&R+ ta!gB;gKhiOb&\He g)"=p& R?[?Az,ܛb ,A*mwٯMyxf;m Us8h0.!4y&|dRj/gg4jg|4%59GYk﯅^#*ZY8XO1Sp1ȅs(cYS+q# $g\Kr 3|Y&,Cg}ҽwJ6^d'yw1Ķ: [Wv E N&x;wF!?3Ά}z];9 QsSPM -EUӭYC[-I\s=Hw[("hڨ$sJP:$1%\JlB t1jve@)%R5cY錜9k&] $)K\2awDQA[$(ny8 s$8hWŌCRlB  yBj}XMͱ s.uXE0!VvUKQ25 DN,*1&ɘvƂ$= @GrX&fHs,BaÈmK ;Y{ĵNhkP m-]8pM@?]M8МRN(g \NrF͵6L Ʋ5f:9]XGEhJ hdQ! (Sf<28Evఢ5" #e~.3}{i9eDro n(6 ZƣFaDp/ QfyR:R")9IHetfi%0 q24  ꬊu $)ΘwaM ra C52Ro !j,4Nr'Xu "D K;AV^ɳ$_كgENϿj\'Br=}l!|.e饘0Ɇ_!Q](5sz(̸0Eaٿ?{׃."A}Aޫkan""!q'Sl6 }H) SDG)(8xjt?u^ݫVÀP& -,In&Wr{HA鹓"r$QLeh|k;.3Sj`dߪ`rzvмkș}7C`)/d~?[ LH\Bo]QtV>Œk|pg׉)1[oʑrd"`IB{ C#P[K%on鴭ތ4Y-a`(&K>/_;MnBUnod[-}!O[ PKHj}8rc^1KaB<ŊR"nx_~7(ogw w=yqzً'lO:>yX`&6$/A>~=w4W7Mie]rKŴ¡e-oyo\%՟ti3+%Еg~1?-wƙ>QP(mw A,/je!WMVix-|# oI+5=2?i[12K?+#)E5X?2O g?"vCB!ƈ$bO8e`1s::R?61OF).:x-5&`'Jܝj_HB+#B>rp-BuYO≅ N;*xxb2KQGeWW1.j=V@+ӝ[j3/݊A17#"'T+㸣(Y"WO1@]ъU$-Xnbvv.JU G ޒ""0iX Mbqbnx ,hX)80 U"bK(F[֥mf/h,vF<z;8]pџdܸ}{3.m<=t O_c{X:*)ˈ4AvWx#1Qk/y? FޏrMc}#G)y/* U0ߣ`$\t1JH]kW*oJTJSWQ]I/JJ]A50ArDd8?(\.\]K/./.\"ͼSyZȵ!,(?~?otܸW ϗ(䝜coIUBYr%5(XIڂW~It0~tnwM[qf]09;1%3tny$XLjAC[1kx8r+nqZ1mos\Fa"Ao§&Emy907ڻw>{vS~jty{(JVkʐ ,9`w⍢yĵ0bUN~fYZ$QJ|QiTFodgGba I돾R?|"S'ً<\Q>>~~|C֩32JZI\ ~fVMG٤Hwm?!̏z_^H[vW/sҒaN% 6)3>^~t0iW L*^G)`nTHa߆d沓wHŢcWK= O-Tl D!Vl +DM -.PtLLspK@ȣ]s+Tj:R h<gw11gUy@$`5yկj?2DŪf&1]ZxcrHC)y@[ڸYZON\.-&m'E-ܑơlӁ+L{6D:'3[l=ov΀Iunq}ˎkq\gf[J,&Gki|U% eYzFkׂ /uF62yF%" *}TA<xi6;;ߪ5TE+nNE;L_Gh>?lB>>d=tL56w%{ufٹYnf$r:s.,/ڦL"-ƣI$t=D;z4d>[`*G] ]ۢZ6>^,FhyI/<5|R.>lX948̎Ps #9a⇜Fl4z>LNq>!fT݉zIӷ$D*EjVt{oNJj:Qw;,-ES9k޺Z"õ{Y<^[ W qhZˆdx:M#m)Rpol䠪~L5-^ \:cd:|>=Ɨa9Lj秮Mb޻Rq`=7%xI0yQ:Tq0{!uhsq;(Mၱk`. V0 b+}CE*B/T`/ݳYcD]@ࢱ_:MAqVj]mDh@'P Fz[cR;ĬWiHm ?xKҍ~rTS"r &"/?6̷P譽D/z^P5S SKʁ29"c +&REYP hbAC!8FE=7t;ooi{[ڀgW-bb*,9m{>a6ER[乇 *I Lum&ϕ4+~ 8V Jt osOgGguJL n3ADSlr s9>.*u s+)Ҷx;O2,Ng۰ȥ.oMz7??Y]v+gڵzGsx {y4wndԙ.V%hD/F\UYEZGAZ^P3L}1 @.c$8SOp*ng\a\qWE5Yހg3XAi͙+X^:1W`m\Bt)J0 e~/.`DrRgQ.9 X`0J:# D z2""N7qFQ4`pHfKd#gs\jKw.$ anBiݜ[oիVh1V7WB՞a}.;Ah5!4Fqf6b58h(*Y%ịUh4BkM̨3hQ9b0XJB!YYW/ džً;ևlƮhaGrqTO:>wGN[5(7}7IaY*naAJ޵CgKlĨa\Gma㥇2_/,C0!B* S띱Vc&ye4zl"mj4BZ"Cn׮B(_(TO'z=ݎ+N(T1)C82S,Y`:+aژ9CR"E;DO> ̦ҎrroD>o".B0f/7:sUɳB. ZɲZ=!*Kzۤ͠J&h?$<9.cg.'6|Qګ;[>˪7{Fm$am ~41`ˍuFǠZhP(t, <ɪ0B))&zSNq飶ě`aA ƜJ*fFf͸Ef]uά U  o|5I'Xp}~V/~ՠ0p41) NzL` HJhh524H'IC1XdOP& fh/#{H8)0- 9rEfm}?g\;jmjInl[82>\Hl2Xю  D9t|`#Fu&w`;9aip!)SшcW3kDjV#PVZad`(QHLd$&`u4(%LR#1s^`ۋvշJKڋ`Fq.UFŋ՝vFYƭ6dGJ$vDWw9*$FFk TB@Cʇv1xfc=9lydR)@QD !R$8MlEPV?LΓҬGMX N>:lwψ\Kh>q1 :I΀ bxuEz}цNm֛]eвen-g7w>Es-C:E_tٜ 9|CǓC0ct,:X~tuՑg)`p%J7bi1CQ Ƒr6V@sIon8]m(\VVnm(y&Ta>Nhry:rYV2WHf+`O0,VBȂ 3 1#^^ܘXz+ǶMD=iK v3ux}{"39t\I]IGNI-[M[^råTYc~2_Š~đ/l! 23x54<0I:aq"grgMRFL23[]]`ȳ<9[OdONu{(VXvΩ֏;z5kӣ*G%CkY;r+@9HTX#(6Jx^(LH6`l upQcIwN "T3:&ZNF  q 4 jª (O lqOǒq/C?TүMXűu  ذ kYc v)a a;oH\egbCߵ[smMgƐ0Z##BS$wMwJ½0Ÿ&B+Hyi7gw$oYBg`_\;#B ;ܱݞ-D`%_\qgpktIq}J8n y0ο?;]')ZEEcgɜLlv=ta :RLݙµ=ƠBOgJ`Vtb քtIV+Sr,H]E'1;SxܘRLV VMۤR~}>9D)M 3M#`J ﷰW\RË*ɏMZw_'ooW̓vٔkG{˫je윣mRwW?]^a5$Gb|HMÐalfU>: V0bCУ٘x~zM6h>u 40`R׷#^< wUaIR"t*'~:׽vӯ?|ݷK~x{}l/Ňoa88ΚH^0w?n?4ǹꡩb rz[+X)7X|@YL- ڮQoȥ<:YFRigUM++@6ui-O}wIߖtEk_b7.E㶏|Ჺ6"xZ#}h%0U bMo4):X7'P/ES`ϑYYVy4H)`4:ft`pWb:- %#Q0PFpcS]Y5q,2 c5;Rϧ<<0ꃑ)B4]Ԙ>U d"!N<dZv BVW.) dmPԉ3F&V`6(V#D|RmY1|8/ߍ3Őȱ̐jewE_bR @Th"U)m9GbT@)("Z2UD3 KT!@̭WYhr [pX-"ro`po] m3#4'(xAcfm?z1< m07%d;]ܛ*{V5~n7">gBisDxk|T P0' 41J@17GK5BVx}fV2&) '<_Z|l4eVʢiƷ}s,FaͤEZ̋ Z&#GF/ >p$LͫxsjO{3pa)яz0|؏๻EOO?~JG҇>\z~\ D4Jr[ gjҞy2^}3/|v8Lg$ttѤjn[߫E.7ur* w_OܻU?ܔ;aT)0@Ow-:)SrZճpcap; nJžy_=eJʃc|\aGSM8`2X1b ?E$"_hY4a-[9@ 6JSb5ʼ1T)b+t@He~rVk/ԃ1V'lJzZGTHgWa ?| >]}(:o"z_izջ?7d?%WK MWN<Za&֔bՋ`m vڭd2I9L)byjbqOgAD[0 \`v.&8XeZHBSnGU? =\wY,n7ȗAKf, )٫ _HY(q(d45]տz}naLQ7W JZWRtg4:I ѣ,AkWjPRHS')ڪ`dzm}/cs ARNJvlM󄗋0^ic! ުcA d :C(>ZE|ǘj9MOYnd8SRF/fy vRD V"0` 2T( tu@'4ރP&:u!JW(,ZNZu/pBTʉ zXz 9|2_4i8R`5&rE[F8\3$$B( =qVP Goyں˼ԛ_X+4yXy'<aTP"oNGsڅUd~MP7oB"֕]b6\+7:[;}CpR6`W$112fm@^f|K:B;Kݮ[]Wo;~S7K]Ssߦ?_d}q^ۢwz]B=!S>`%{Ǒ>]V}WzDcR#@ F%F\Ur5bj.* zqu⪆Hs@lJ"Z-ܾJ:BqA8mH\U-:u(R^\1^^\P+&ራJ"*]\U*A8$tU WX"*>WJߋcWF"[8/G&ƽ!vOWgm$CݎY~ˏ?2}|tmjs<<ӜN x3^Mx:`aɶy] Zhӭg\[pڶۛ6dӽoPz th?}4 6R6VxnVN^<DZ0~T*wbi^ky뿿{3ۜGϛWWl?[_x13DBȩ4mb2 :SAttIuP|҆>%%1Jo⢼/J+ Y\z5W6KYΡ)죐 yPγ_$:Hr 7=4@4̷|7ya07>7lVbV{~O Evyk^lχnZ^cE,yή(3vIy=.a}%tɡ[.S,:a{|&^S?RMs!) &ǜn%3R>ѽ"RqY!˃*ZBʭg"xZ{N|;:V(PA'c@EKR8נL(ڜU(QckEglhxu{P>l1V7$~[/>O<IJE(2O йct}N]- $z,,X\힚{БD{y{  Q}N"z>i'2FjH䔊D:6rEvD"?sھWA}yxtZ[Q }5EՆ9i}GkљL(M*ZrP6^""th$(Mʡ옦k^̄70; S\qXt.3VY)^5%o> 3([J *bAgZ"Y)Bx,=v¼3 YU9=M8G- ޯTeESf5oŮj7iHqr0jshDh.g=)BSTV hoa {=g=g{MŪMң pMR>OmESjnu$-*u .&2i礌Br^,$ɢ4k}̸'#\҉Adq&yd4JL yX Gp𸻚~ȃ>%|V<8Ŷ|pm`[9ǟk\ӵї ELs"H\0|.0Rrq(ōfC-X Z8FڣT/P):Tw2ل'+7&Tۂ)Ld8*}g:lS- S 5Os ^~V'*y%-H}vAPک=",v%mB')ڪ`dzm}/cs ARNwK@nmC댜mhjgCs1:}>u BbdI/L:ެ_Cu3&fdVO  Jh&{/TיB*;kNyEMZ+)gHL|@5f z+yֲ%yc^錜 +~ AS9%eRhPӦ\.E}À%,R:0=T8& \B2l\=v/3}7;ݺDm&oo[`:ٝϮN//ࢽ͟OG~LC  nۍn\3!2p_Koxn|/t1ŹyZrFp3 '8/i=~yZ32*>$$y s4h8a2ig̶e5 H/T>ekފޝfƆ16`kGۤ4cH d~0=]Bz0N҃xӁ?_7_nvB e8y3T/GALW:^ݹ/v' dK?E_3-{=U>2aL]j+[&M=-VWJD/= T67nxyO?ޟ~Sa<'^_-xy945MۤioЮ`9bnmbg~050]=/&mUHwf 6pV! `@߹j6Ji:Ep}̖T57};uIvn=Y݁ဟ]Wd'w=RrX{fv o߸# ,b0$(bH"F@ =̨ASfHŨ{ӑN?WR`dN!BJ $Rc$Wiv<&󈐏L&"%TK"5'D>D>(m^ /׻8ǹU*!rMFKT<5+R2Htu?!1@g fb`+˭":F zz'$K/! ISwp)DO=cF@X1b"i,})eN u%՝쎒y m}i~In=b՝ը6 ӈj3ם^lUf<洨RbK8AJ'^Do^8 ;W8M[SG-TBeHikQ1G EέRȰ, Ƹ#aREbDUHcYv_ <]Zx5鸉zg0;FDSAN>_n`>>8;YTG$SMuCPDȭJ0c".PU嫤^P5Œ080eZh(Ñ#2&J@DH9\@ J(U*Yo} id > ĜhMga^}bAgJQE)3ZJdW&zGyӻi_\nz3 S=~/f[֞H¸sX%-:sod\>"ԤH`]i\6U+Bb[A+}'$"D(eI#Bb{/(0J*GZB,AaR/VG"=C" 2""hSA #(ڑpHf>&Ά)jr]Q,=Y,&,Fm21h̨ra$=.9"q tR^*cDkUdAP}8},?[XPzTfpfW?Ӳ]\/0 ]LG aj3jX$"﵌FMMVH{?+2} nb%윏_@&kGN(T1B30F0"9 V"Y$WE l9N]`֍$S{+%R.MK4MkuB4& +[&lIRL@Gm7+XzF5sf"(ܚ18иD){хfSuά Uх;Յ RW"5/3 g73^<>MʌFGklIWPPIt,aIIqӢ^S)tdy/rRS !D%$ItY$R:N LBDN1Ydo͎'jmj 冃`ۀgF!!#Ҕf's%KApolyaBg ,d 3 t rcHTXG MlׇQiO>$x(1xFę5".hJ+ : ĤN Ij$f.qjsJ++Wڋ^a;X VQQUa{׸zH'G20]:=Myx-$o2Iد_L}uu ޝ}'U£dzR\Xq?ΫiN\%q|?Vv+mx),a]Ԓ DAڸ^gxGkɿjUN: CLc+-zKl5:%x  [xyTM훏辨tɸ?ӡjAXE4_1vX=sj :*訠 :*訠 :jёb*T QDU V*TX U)B%ha*BU V1AL!ĤoFj݅U V*TX Pa*B P9+B堯 V*TX Pa*BU ɖ%J*T *TX Pa*BU V*TXJ^*TX Pa* V*TX PAU V*T4kDRa))$<H<(E f(FʞaRNoL.)&ޤ›TxW9ϟLrr=nr#B3GU` shX2*""qXF=\mw5t1Q*=F?D D`3/D1ER" RV&X"-%Yu+.NBhzUO]G-#%1J@meYVd$ʾzZMG: vP0W /PiV/}WƉbTEb#"LX6 91&3h<Q5K߻$} np҇Faw9NJSUlg{3`A)Z!QaVwyWƳ5u-v`e$P[p"ΝdO5^ʠl5&7Y3MT㐞ܬhmu_=w[p$ |T|َ\a_Boǃqc|1X6"l3!Ly"%Sm(y%'f/6fwp/7x;M HR.2IQHj LF%7%kO]1&]i&!wI~d7m`F?O@W%nT  ք_<ߚA^ w][[UV-u<_Layv$FW jbWUU!q)(ݟR<.JmdD !RV4q\):$y*vF.gq5PeO~7MJ]gmvM.y,\)Z㢓.M~V7 s1TjJg쮴w4\ծ5/ pjUr|H}uo_d~9Q*;˳|M-q)zAMͮ][O6gy!\Kr+G{:JL5"m~Aj+|jΛݹ'Ƴ+Jz)ڹoβ-[rV[,eTI6x 6`jGR3wmIޑɾ `uGD",(D#ƀe3~*iFxfRڬS)܂'/}FO' xLh-SLM 'F4.%|"jlRL'ͻ PUw-xRݹe"sKu瞞M(ud`LzxuvVɷ"QjY̊qnLM V>(YN) ]YQU9€PF[$I{_#)#)y @b N֒6HVL}iD-T$1Gzb=774O_[+xgzk.91t-r"jOD3UH|JǘSVw^UliC'[^mԛVM{Q̢iƷ}#F!7O8YZ7yR@j:R,PT YQPEpB'`vsj4{8*}Qg$msfy燔dȃZ#c)ϡm-7[ eG6z H=vjGJ*ܳA*Rp>[3Ȫ #N'}F4++UAGKATS52᢫Vt8ˣ\)nbh]d .AB霹,ǸV9kdNg5r+@\ /7̺2~o\zu vi!%&ӗ+%.*} u}`_b!f7 :<8j[hޓmSNq^\jxUQy<:ݠKu::%:ξ<>ө y@J(՚\{(JPkaߕjҩNA*0 9uUPUV}WWJ:uh%ͫfkM0ї"0gURhfv{MǼhOHm3^j2Ol>L̘ L1B%4ú0M,:S~| 4wĖLX/? ˬR ӕ)W%_Z7ƛ}J(t7hn4\6%m{Y/5F.wmYcȢ!crX4Zn*t[hU`|.ߪ˷|.ߪ˷|WoEJK_>^R6U]$t1EiR'` h}4(Er,-™8n.QknVsTx=,{/!l٠pޚBz%n5*pf,j#>IEKrrlzqsnБ"RGGiaI.8`0Tg-@dt%x@;NU# :HAB"tNBN&fB?*mҕ%k eÈ 20G|H*ŅG<& E \g@E"Ykc!+6L涋\Fh] JuSzpՄ8⑺W kB}#t&AA^]}p2%Q~ J}:c)DUjOQ.,t%{QB&-uv%h 9{>CH6Ear$AHZԡVP iꖆ~%pMvxk oz@B(Aۧ;npC*6#-ue-Ϋ+%mdv]-5 VKuu֎lk[H3yphX}M_ujn A VWvf܆(ԬIޠ:G;aBX ,7A;#1*62HF L91{: @02`G,IH:6ƒ@oetd{eG_k<8X]G__C_iaHonT잕E/G|zd6J/ii%tUJ̹sJiXc8n1R+É[dyqܣoѩw9m6FDb̷,ߋ^~<Ms~6,+:rGbJ-]>S43h٤:/>b[Ҿ* a^9%vN0/_Ư~u[RwC⺷f56F7󚹰Em @<}kAZea ;$ )@.2KəeNg; BsP6~p-6ڋM[JD&'gjx8?O?}EL&0nLbwqVnAetR dȢ8 >#ʇ]F't.場eY貄uLUͨɉ ֦, ՃFL.1Y=oZzLJm9kƇQNtakT]ZօӅϪ M/~fk`;yşP`t5O\c3'| 19SHf2sc"LN&'!ZG rP@P?!)ڔD AC2%ceIN爙Ec>s9k0r_vkDYZ{Nkw ~%"3 rdN Q24FIńbu %Q*]}6>8Lq +:J4k 5GQcZ9MiiKrwMQ&rG;&60aVv1m\iUNG:H۰7a4plg戸G\%J:Z3 Bzb # HJ1(CQHHHqbKFb3[XA`I4aKY3sl-vM9}0xIK?hsŴƷpQq:aoP=q[1չUN`F]^HD8]Yi"b| = ҜSG(JxL@ r^(%DǕRW^mk.];&Qd!*xyigB5^|ȥsJԕ&]^?!\L=).u5Ժ^=,`ibDgf!,eGYeƫ;K@L!I<]x7uZ{dz|Eu]`+0: f3e wmSp7 sۉ^Inl#\b *МhNjR 4}/М$+ 4sAvUf0> TW !;G•`Sci(ej1u7X{X$s`-CZic,N"JՋ@=7Qj iF"%pSyQDD2hrcT]O[Oӭ6߳-jк8QnsʈVl/_89sӞqBpV{C\DF(5y,rǣtqpm1118dXm`F)I-fX mǑQ4!B`ʰyq!Z_qJ{&CDT%w sg\(arײFEx^Q"ӛ1yw kx[[؎<չG+8k<œa C`NXOX㐈ĴJZ=pD|yp׍}6WcAю9Nj?Rqp'.*2O;Q ?HFx0D[!DsٝlN \Mއ-gv]8$0Gܒ]Ϯ&qN\Lx)On xP{̾r6N*A,F`BQΙ18!D+C .\bsF| O |_^zWgȠdaưϷLB}Ap:sglՇAu-TG74.- V 15Π_?vOI9@ܮgt轓 DD˧z嚫4ڒGo)V{<7ƇU +0F)1”>*bP ÉYpxasgB~}vLʽ 7x HR.@2IQHj LF%7 #l4 ( `hq:QG1jD<茬 (W|%6!`BL1jveZ%_9Gϝ89kaL'Ivx(VXzҸ4Kx%gң&Ckףw:9eDroGQDƣFakJEG fU$0^J׿js )> wD@R2T 9R3A3MXH&V{Acc`㞅A$2G!8bׇu/c4`;8F g1Q+C|x Nt9L|;p\W:3D cHBzKhQc)v;Ppp/B$L1n)JlpgUŸ 9sAhs\^O-DO %_҉bJDb%7ւY>F8/ '-̸ҥyRi"@_0Uȣ}*ɤ,E+z Ktx&Esf<{86xq5*aP&\:$UXLDN =wZmgiU"ci:? 0 |tuGmqi*VR淺]]\~TD)MS[ pbP$6?8OFwP7L~*ͨn]L'5#Z~ Ln,nEj|fm|:8BsB?#E҈C\ '9.VuJXҩ=Ǥ`ps؎?|x '?[oWpFݜ6" x0k39zWSnf1h{nzVO{c$n{heIF;^]]|ᛯp/&iz]TTc*JgQ Q ?$k6]|U^*#s>T=l Lay۩:M̰<6` Tf ~f&2KdduS2=RrXVeȌ/Oo[ӂ xPb1"d'0<] Q"C,8f;# .u%F}02S< K'J;9 HB+#B>r拜tF';,מxw;lߣ/MLQ{o5[M:[-N҈f9+FGջqy%3ZEїXɢUڢjլ+VhӮ<ޗP*iIRH4.J 4oYhX)80 U"bK(F[%63Bs4.uҴU|yBh6\2y\CS΋Ѥղv?kW*G zT*ւxuK9Ziy3/y]*Y0R`D$/h;>< >;xf[( QyM`8ek%,FTD"!(/)Xb|'xhx*RnyLh lFWۡLm9 j-cc  $`R$PMR;Yo^z6%y%wxS(N$v|7;oR7g=7J7ځsЎ1ʫe?fsr-# dRKJA6䜧9%ңC}36Yyj-& C!fH @#^ l+&{lx=?rj;8uW ݆?L _àvv߇X \9²OσA;A}Ftöɜrc$9uTB6xјI&r.K3 ='O>W]j=sƂIz l 8:0 E #zǻ3Ggl&!Y{îk7-@[@ʠN+uOՁ3D)뺖ZY'x8.Nf[u'a2szh~?~? &M=0.x/1)*0H CISR0y[Ngs6D- Tv 7%j/T;4Sw'u%G?`#'yJ/{ϿC_f2oF) Et3*%S݇+eQ,RK3xJKxp<Ѡ|W+2 $p!}`x3M+1N,m'<]C&<3@xbu\2WL:?ڿ> <-.`L[@!SRj/ߓ_Jb=AbxAJ Go.ۃ]a%ڲf,K$\ mHkb. #R.D-~A$* .xA( h&>[gdZ9(pzWf\?ʇ<#0EI%3ȖXR؂dDΫ^m"zlEY!oi":妗@S IrU{YYHhFr4awXRPl=` -Vxnz (;~c;@H)]dzTX!yL^@gQU bˆ"_^ ?O_;-cLCrP C+rJ~@C }!JjHڒXP.Bv 0VAL@lŎTj%!Y0Śz`me}ݖc 4c|Z~}_.۞ v+y =]YfqoXʢŢGMhrc1e[m/kRI#՜5zy_3SڧFJ`Y=C1xD. {-Zv ~g}pו^{{as~O1w,7gy yY~ϋ 'C8ϳLѧ,Bbk)(t)IC5~v}Wdbfsmɣw ?gߌ%هv 39ܡL B"Dsԩums-s ZS`mjd>-w8Yo|;P&jLϾ_Bo StT*VWOiHZ*f ^`ɞ $ľ:THF%Ԣhgv A :#h]!X:;ݖ]Et:X-m_9kYjw~^V57s5,6 XtmW0OrymL-w9[$^/]ȲϾ 6ilDtv &dMrv>*6\[`KgVWCbe*gkH5aO{ޖrv a݈Rb IKmal h j oW[_b8 nY釿0drcrbd)\BD!eV`e1)e4N& %&N{Ӿ 14Q6gcM3և!{X= N"H *!赜;_^{XnBU_o#]#uM#LZ9˾l lCp.y QUbKNxȲ4 EKAlM%U!(6J>\UUĩ-g=;0E&/EԣE-&U'Eot`yJ媁9Dx18yX(AuIIm `ٙ\s@ڙ,'m X ɥwn١9爯=hKF[rjr{}{P[̮|ndaq)r3T R2 .f(- Ν^n:!arCs%; V1en<,dkEJk˜P112"8/eH&v,l6&WJ1pH8''~c`dqDGBGO'TPٙo9E{]ke4jم;M3cMzBcC,8ӡ-׺Ҁ֧coV؀7lu@kV JSƤC4Id[9VbGJ표ƪ.5сBQzPѦ \ yVG~@c=ٜ6e.΂qi潊+l͓۬ew^cу=,Fo^lˤc6GG&gyӜO(#:}Mo|0󽑛塱souu3=";}?P[&^K1z7tYiWQʻ1oHr6<LJ%cg{C$/Vs@$=SG穿c?mr2713luȑ,%0lG\KX &d yЩV')vIGMBe!~&,_}=ә?9d#98lH\D%$٨1TGc ի eWG `hb,/x4 a8a(*l"TUIuРOfX1k S1%!n% XU][W)h`ЈevkAc݈rvH5i ?y}(3A6ӳmO_\~~.Jه7DעJ'.%]%s5f 9h`!F唩by\2irh~1l9޽ևjjĢޙщ_[ݵ)6o cQ I^W4&0j}7 J?Wևb?z-Ygw/p";F)rЯSȅe7&l2[(]hęw23q! I1P Sٴ0^fG`)%-rsMuvqv={ f&eKXn$>9ww#ۂe*u)xdL=/y7[2 옭ٲ ރ#m=׈/_/On: b%bQ|h[1k cu&5zg3.8qvdkv u8 qw DJh![3ZDh<jOlrP(b>ftXؼ{z\Vk}Xuz.c߿؋A?=]Oo>MajuX\8!*UPUM0\v$]9z E|XU289" Ie(WDs7oF!ƬI]IEGJ?YW1 梢G@QT-z-g]W+v~1_v;]l A5#@j7ևfzČx@xՀwcR>?};b858Co# KHDc5 t+F't<0iBP.[6&aBAkc0O0Rl?7^U 8?"ztn[ӽ))(/m-[)}dHkb4dZR]w>Hylo*>,76(؟SBYz7avmZhp (0\O&=o9u&|u rLrrVn|@ĵKkm%gEKX5E˻x+6nhn%ެ).h+Jg,O))̩r{6e!us55.DUe d(lyqg\ Z&rMBg]鶜I Ǥ)kJ%Zik1gJTCUCeYlFwb]#6<$T>V&tB]5^Il+ЅE@vTY>R&Fd"6*qz5avsQt,&"*o1Tň(E AuY).ō:vD:6ҎGw]SEoеAuh7pR Y r#r93yH|L5%p;;ֳO'K[VA]="4&k6)4UȎUVŜnhNέmV•5a7|徲sLpCIyVc-{TX/c͂4fY|zgPO&g@OJ`Y*؆HZDk^,M.ڊ./YV]7j*H#-zU2"b[kJ:*MEl"&A Q"c E،mq>Y]+^HPdcqrـQ*Khyƹ݉z&tro/^E'㽲dKog`e鵘)]\hySbؒe,c5u]a}_~ ̣O,X_m|jyV'~tw.mt>zbO8y|]#{koCSbLZ =s1%,̙fwMOU]UhX|λg&?J_p]JY=j>XtxȰP_ʁ5QJTq߭lz`|_ 6gV&' U&?eWnU͓>՝7南Y}xY9Ik͉97_8U3KbfEhiy̌S; #PH}$om>,GCF`,|<^6],+N!:*AGmmԖg!G- HH4Y}#PS<񏪳~{7/O7'/_<o'߼UO06$|؈7 ?_hk3 _c\|qo|ҠZ /Gՠf OtZU&g b~^Zi]Tt*JwKQAU#(w+꬟x|71>-hCcD1  N`y`'`F B2 EB퍺oi9/EyF).Lx-5&Nr;F6'VG|`2oBu ]Þd(Fo}Z.0A0u:ϝ7\ NSϘ8P=KUHbaKFߧ0E+} /$gxwHZOZL+%F=mI6X{UT=YឤګNΧ&G{780>',ģgl6#Wo8}J?.6Z-̓0()eiX%f)g:u{R;4nJ.$5M¢4Yu62+O@y߃ 1&sRSA߯ګgkzPD<9\ݏ`NGO W GbWpzzNJ+v\ \%rJjpTjzzpE,Tz*U"W]+V׮p *.iWD ;W\w\m;\%*]DJ1ZUs+Nd'!K>eƝMp1'; PM1"yY$0M~:>BHI^a!=ZV;`v\iK*\>fكW[h_:+|̖r)Zf(<̎GLqy #9a⇜"Oߎ_dja6r㜢 C*9q$o?ԂL ޝ  +0 0&si) Kh).d??.mAo_.xTθ0Z%2\+-MIu :ZV$<" F$JqrA;9. @pvCOy/G.'϶ _ RáE~_e#9\o_V{卧t^R.-]\«/gD($FFk3cl灆TM-!l;UUeUe,RDs5XF#lQ = ׷DrKьD.t7)6wK'*eN9bJ]/%> B8I` shY17<jҘz3u,s, R4ȂUvlwz&@?HpR:3#Ip0Q:R #z!Uxd!R$Ԕa0+CʘtǎYӑ:尐~,mʇyTlnP?^Lv tē+lGi/[>{uX#g\P)ׁ*l$p5橄@InQ`(xfz`x`pbKFefq5Nx>r"Irw XHǞ Y:ġpNPc! wK9Xch6t s70=܎JɻBw- v@)J++jyF_Td2~#OLƕ$AXHK5;%҂Y‘5\GkX7fP큧OgV{C VMq9K)--8$IXBIBT $uݸaky{O;/RqA'.*&"jާ(QT#k+7h.{L귰Zϗv.^U-Zdv] !)Cwu/kn`&9>K]x.F}L;qUf!Q{ F!8g6t&T:d ƻk{X6)pk W*ZiJbb `ƳG&^ qjI ԢvVP7|dꐆ~F ͼ3hO"I5+.;*KH" /_3,qpzzYDNl*d!WaPČzGSA'b@YgI'v'[}͎{lP<! i*Cm)` ($H&)rlU9#s=uz]<ͷyW Wo^||[U;pzPJ7{' ?eXǫtSג)xЄy}+^ #_)vz P0vY4a#A2sVk/`387,~Ͼݪn 2\|RګTOR tR]ۨZUڇM),\h3W`}ΧT+s]zrxG[Rm)\\OykRAYʕt#B Ns1JUȨtEo}[E !I%iㅑJm(B`stZ+np3rᬀ!r;;ϖI$JKC>ĊvּuVK)k– ʤZ#b%SY!"qN jBu KMxkZFdʃskASfyֱtFΚ7Um%1,E{GTEu~!OGQk8G*fe_P7ڦB뉙' Ǧ. åh2 "&p "JFc&}Z,5SUyq9rjM2`& I 3P J)ԁynE(:l1;$t֡B֞q߹lӶcp3w>;PKTSw$'wg LNrF͵6L Ʋ'r-wr]um{w:w}o۩xNPTapH@% `hqlɳŨL ;s>oy\`9HTX# FAx^(.E4l,ǂ`0Ԩ|]u{PG*#R"%1 s2LdA3L `ª (O X߀c6h~KUºJ1akYc v)a f·7@ 3g'a mnP5!aFF-!DI": "D K u+H 3^9gI8_?Єge?s>Ѳ1C_9[$KO4Le}$d? f%q(. ?&/LٻtiާK SuyO5s2yTc@rO!P0EdJrp}S/)~꼾UÀP& -,In'W"IK;)WNOQYci>;l(}uevaJ(g fTWb`RRDnF0`" /d~ZqKH,T]yuV5OnFX>Twޔ7^Φge&1]6'|ppzV,Ջ]rϦ33O0Bm#}aH0Z*A }KLVhxLtsp>x8hyȶQ[h>H'C#!i`xR׳+f#~q6_Cwem$I~lg;< C=@f<F6є&%_7xMS")6/㋌;|ښj҉g{Rǿ뷯^˫o~yoɱ}/߾GZuBw<ǚN;թ~Oթ|S+ ۜڶ Z^~m>vv+kg@Jӏt78vIxX קi_!OfԆA?IԪOR!<¯nUUq#GڥCPqgB> ; g/]1Aڍ3qu[0u<>;IRxnPD$|HK)pPDP#D-D:;xQA^q!G={Yg_1x}8TM%29ㄤumڊ)&,D7lBVV:qDF7VeUϷKiwD7 T},իN@uR͂V@'6PyzhYdyD&Ҝ>bT$F%jY"|cR?w}@Qn %QJ$^Q9K͵ODu|2"Hf%6jm]%amHQd袰"T <` f%IҘ6g}.Ro;,wbc^kD~AuF0oНt=䮑8UfhՉ5q{WPUaТ`M j {łiewbwi}po+k9RFh@C)"ڌBp @y#nm&NŬÖ |9dx<7|_ =5)r 'BpM@_a鍷νG@*9_M߱GnǓ7^bq=eVo"Q^0.o}Z0Nr]Ckĵd/XPDXX(CL=y*ozI%g ]m1ӺUNd\ɦ"9QɂBrUR%.k5_jv;7i:Ϯ(}mշb]=z =>"@x٧d6]*r gJhTXӾ(L=*"-u>^!޶#] ?GɑMGZ]l{OkORN||OV23/k_{=|2>e_[R1dY-lb!j.l5wh+RZ,,7A;c0L||BFWtkHB[PS-+&.:6Vi44'Ng'v xFo+^M)1jIo=#8-~#He]c{Gk_O;n;_):by垘傷O8g1fƏX<ηq$>(-cN&X=iVpκPrSP}xfvW>jUw _{ܷ ? 㻍Ѧpɑߓm}+\^B}Nd! Oə"TIrw ;mxoA--pv Nx]ڏ1L,&D(Bx(@6bYm@^ǘ9&(FˈO˂+e/ٍ t66&l]m& S <5`լ%Y3=SK.-v)k:|yIzI'ϻF0a%!m8R捧 ^Sjw਼9GN@"k`:HRK}ޕUΥP[^d((Z$"$:M5^['@¤L>'K <`HFflFƛ`a3͸+Xh{,W,\JT)הb2O?O|7G<0bs'}B%ٖh ؜ōQV!\FrӊS)\6T@?$W!H%+l*m!)Du N 6FflF0>M+PPvo{k4/bhZ&YpPE5/묧.C%gK1C[<,b$bQeCX2Zfi&1יF܇T0H:z8#" 㮈(#GOԬ(VT:z#1b9I@3˞5E]E.mHIgq㌠+ Rq&-d0DĤ%區X+[7l%RV:Xg3-+.Bc\=.xАuw8{0eO5q?OIKGzy~ݦ,osW޷>,e is!;/- P2,dYЀtu(!4~ bB@IuYRdݤʗt=5ŀ mur_{g鑴SgnS9kJdڻg]%W6.w;-#s#xGó>" )o%LZxX2KgH&]RP1Zs1ZfU?ad&s0QD|D!B9*B(Qᬵ FJ&]GGP0np6%<;uǚ~tIRžo/snV>5vۉjS~o'}|^7vRZqXS~S~{G=0t:%{ynmtA*.Y]9o{f훜j. XGFU(ŃB#Ҕ:ؗ 2BI=imBZ ~$J"oOY]rq'yb:/HޫGψÈAGĈI`hqWcaUZW)W"=#~:X \'WXJI) • x4pUx,pEj{zpk{L lUWUUmWOtҜ.·`OSZF"#&T8MӍ:98;5h'gr ~uHts W]t4V|71<;7r|m+#բv3 +2K' JInizr{e.5~5n3Ht>CwRY=6Q;}4ù` |ύR q;_Ήc*x`k& Ja(> )´1et]{34?^~}V]l}+?< 9AąV\ [LsP*xYDDacd]bɃw`tE@[ |ϖ7afM&}vfc^lw{̾? !|'odyIZ_f2|la>AvX lLńs.Ʋ\ 8b.Q1Bƻ#|ֹlBk\*+sp#Jl*8BE#7A'Sp"&KZ" ܂F,h\_L-fp.x4W`H]4{^%FU=5h7]Joq#)hEYF.$mtv`1⢑ѺV>7C$3HΤYHAO T.B֓%+:e8Quofl9lk,v uR6K.DxbD$#} >lr6+J/P]k|1ol(ɁWB/q칕9.7aIyg-~5:$sIdH&/@6JRڹkB!yƒ*Emllc_{~'"`IB׶Xmն7i`V'φup\Q/gKaF@pںx+w55ߩD|\v+\W^ vu4uoOZwItb<$\xD@ i%MFvJDXy) v76=_X_{+ZH+-`1ɪαR֦@K@]Q:Eitu8o` v2O,&vU 0egqID%zƺksiݑ^ycދվ&qs HIYàEp T9'S"\@N‚I"{CjkHw^-q=?aWb.ZYA,"|C|#1w|0%B]mo[+Bzl4m]l_n` lײHãw[GR,Z CC3LDĒ _By9 ?Va^7L֧g-OģܬsGaGԠ:SI]S8p/]h8B<][MM7D8'$:*''jLprO&'y^)H8!0ӚQ"v(Ⱦ&R geX~<6i;i2q8-$NTj?vz4)fF66~~zt6"9/BmW̄/=?=,]0D|w zAHSK%!Wt knf\!5FQ;y׽phM ፭2i'Zm+.6vp[XqBr+uEN,ՕqnJΉ+;wN?'?oOߟ ۓw}w[u#07&"B hۛ6͍ئiSדߢ]%Jvb~ 󏝟·w˫nCZy&]{_Aq_NIT#,_B 2р&pCaIǐfQX#-szR}cD)Jw4p>ZҔrL~J8ԕf0Ѵ)"+:-)Q+b("ZYDzd&`6Pch.PT3<5ѪD2u5GĠx*L,Np$mП;xz A/dQh/̂g7ʵYp̓ NILWW9 Q,Q"+MF,O=RVR%HY+"tqzrf+q)PVx8JQ4 νB~j y=+XZ&0 QdeԂB77cD:Db̂ M5"%"q 4 0KmК3Kr$0HQ1x tblV,'f~>r4MVf<ޮV!*K雛![.j/<ǭHc^G)%2TB>R+Sx,މBMzSwE6Z0(d|jdcL3m127HMVEi>[sQBSv p ,RႱFB&Qև ڠ < LF+ ɾ6ؒ?zG|,.Dj@wgga;~FvV:t\~Jӫ.zϽ,*My%U#ݲtcلZ8 Tja]"RkҖRR)#kԊ"'c˩^jXϗ]sYXZŃ`i"锨#,Jrk) 8.qPnODAKުaW*Uw*ll RRp¥}B$@0!-U[*#%4 Tǜz(%RRƹ-@.CM$BzFt;5 }JƌLY) G20" 9(w{7 "PE }L0ǃR[L DLq$DHq b XsL<1(oaC_1qL?1p૫AK39f"nםa6u[5B>k>Tp=:YO;bf$\ǡ$D+()\(I&Xf%^[jbpNqf>WA+4Э||rlX]<cPbTP`U8 HkZ'T)b3]Uk,9?tb69K%Չ($tJ8hB׎r3t.g|.%(vH"5 P"4 oErqIDԨoy"l0(XElWF˒ypn4#r28Jg7YlTldT,cy$fĉ nh IT^pNLn!*'6Ԕitj7D\(ʛ p8SA]b4N)R'y)Zf!9 I,yHJ"2:lƑ`K '@LdCe=\zQӢC A42g32g)b3Xh ciQp&Q:[ bַYC?9dY=t'3b b#$1$abtLp57D,6?0"[D\75mrjNiɨ!*R҆5Ze4F*HلQZn&)Zq獈**9QfBsTOtBRlz,vj,y(.¸Z\lqq݌w1h Pʼn& q@" ~&WRШ. CY36KQp/=r5Y?mEb+u<;$GPQ3wȋq<'cy[Ao da'K_xU.)E޼?y ?9^ , KWůُzwW=? (ܔel&4Zì*瞮"O1@m>{VJ΃ QnTpQTWD¨1 W4yNP-k|4M/oq;[Kufeznz o_u9#\ȵ "tdm}չ 3QVy67}J<Ֆ On?rR^g;Ȧ9(RMnd{ ZskM2%*9-oz W\rT*_%PR**J VSN5u.TOO.8w+)^LyL%H)sX%*c8?pN6"$V)=%~b>PYj&L NM˭gWui椚yǯEѧaT!;{˥P8& V&."Nh餗6R%mQ!4n':NYiTP,I-c 8AC$akA/{A-C:&h)'D>Ak$.N1;G1YARTL-wZXfS^~7Cdl Kp4W;f{zD\9i`Mz7 TK~ @Vuo6XcġmB=a݅ėjx1..ø -r) . c@#ѣpEzcA{MZ0~gekbS}x)75W|6<)ED-)[WH Z"Pp"u95hL2ӯ;6i.oP.cGN]2BG5W//'\JqVF}o{C3`Ƿ"rp) 6VxV\}!$ȢQ"yb8;}M5+*lP9 FLΌVBJg93.Q0`jBu赀؃^+ګ~H]^xXu'8J$=y JK !~8}2dt2"tG.>\ϳϿ;޵4bLyzzMh.nj/"}ks/ rEDi/de BvGvC9ZQIg$T:Rӗj%+r=N\'bSqRj~X#*r"SH GWE`m \i>t") zzp%q>U2Ύ fGî:\Z!\IJ#"QGW$XHopU]G"J\+8EZqpU}=•RfUX룁" JpUuኤ47 pnJM}eyʦTDN6aa|qye|^p86E#`G%?29wF??|4|7 Ȁ>pU7~5`s40MJy4N0I=t&)J`ӆiu nf@ռ`FrDt9lcfqw*qn d~9-o(|8j`)L]'sEPj>,/2ڇ8t&kI-z82M3rO >xg$ը[/?ڼN'I>լn3!xܼpg,>׳IÑB—~ AdzzFѠqQl*KUyko䢯@~R #_.33rD诐w%S&]>F',CAT.y^Zydȗl y %l)BDS6T>6PMJC:7l>R.ڹ..M v`m=+y[>~Sڽ[ڽ-WҩƎ>Əiqi2Ct< Mae :D=%z]J$ `2"K>Y./UD]U)T )* ;`LR%LBB7uTvfMy?!?Z/6>J1#J-+y4ɶE\}4g"D)ɶ1pP-OC6ag,ϛoɉX#b5RЀw|Ma1E`zG9ְws:]7|W.;u8׃t_Q]=l<4׹j*˥ypu(to `oW.)!F=@=嶦!(H덬<}<:^S$*VZ$w/Փ=X|\߻F}kZ(eW~exWr j#yo XG.FdOݏګtt\7\'ր96ol"0uDZU)FMH,ʁZPn A% )#$٬AuEbYƤ|kunhr(\j!I͖fVy.rVt]!_R!Sn_WfȜJZq^8]NT}3rh{zm茔(=Dd'ƌtw&&Jcs IGA8m{ΐR>wz/YX NVw.|_L͟O*iBR5NAl s߄d\f܆(O{oh6LА{FiuwY!#׍avҒ7)cZ()bUe"Z}  !$I٣$\ 4jAdoe61-C$o jvN3 >6H}ҾK(K7n?j>n^WE.QLr!-Q0IX . FM+\th9R+IϫZpt\a;9}mv*z(,fύBE-=D!+[p5q[< %L'9Iyh}#ljOF6}HuBRIRإgԳ3dwft(:8?خkj{rl޾ le"eHч8@YJ G; B?zt?n>$> %TLB6>-=WBϖpg,:2( HcA섕% d1 %1ȨD_UPrAГV.r֓%<-\GXMȸ Vӌb½bDel)<}XśmInggdy/4~4| g7 _61XM9i%rc"MC'#넺[&嫀C|L ()dKc! NcȢOqg;b88մ㉨Y]Qgk4y+r2|Ȝ)i!JJc,ْٟ)h|ȡ.`I4EG aMʬ 5G(z%l|-jl֩_Y*x(XM?2"{Dܤz<CF@**kX\ZJc18蛘6@qV CmL )D Yg͍$"yC!BeD&vDzibմ䩸qQ&9ĸLs ^bp,SrRCjTv I @8[OW])DaX9Kk ]ŅvYöxЭXlz,gr.6\2:\{CZ rJH0* .s D(ܩq =00DxIsbѱRϓ7y8 5'.b[!R?*0VcJ`2SʮKH8(/7+ƢXzjlqHG>s>Wˌ|NhM,_"77u?7j<^ֆ3#e<߳H%%v{R6RY.GxغICfug&힗j_0wV^vhݩKWHSeK)ݨmOxAyr,VDNS6VxV\}!$ȢQěyb8;}M5+*lP9&g]{oG*q&cugl'!ZbL }!)P8)k X8_=$ KD[&gL Rĩ/gb"J90b h*W5OJu>f(\7tKHTZ*Ok>r /la\E.R*y l.5'\>c>tJ%J%_!Wjuk 01:8J7 4Mѐ$Eɟ}Wlt#f^rrm4DJR0Hp:=1BItOG~qSlqJo:J n:<|8[gJ4"Q0]ۇP ?ZtzYh#J 9B+0Qdf(XFq Q>t ;PI' R$6]1rt^TZ;aݙi%*~x~)Sõju醧(fT¡E]$RsV\Mq,=ׇB RBr|n>ļ>ShmkXmxDiVCͮZbܟLR6᎕B^[ Ƒ:!D]7 ~}m5c<3-VKƥ[Z0mO󥪱DB\ EJY׆Rl!A3 /h@QayQay\Q՘+Bs(ȱ?~2|<8rnZ$C8np C~G d B"~ >o3MfYVztS|HRrNjSgKB& O)ɘču^R(q u=P`W10KS0-Η 9~Մ7b 3c  w#bC 6rA 8P'qH981YK P+)P tXed6>~lq)wBN'=Re9 ZP#Ȩ 1a^y&Ԭ =LckrXIqYon:?{B S27Ўv{r0>=d9L:8$7i.|̍8./fOMIL($ (7ljY3'sht{GP@[]v2$'ZJčp{W' blQ@Jz!ԁh rNH7{g?S>,f燁ˎE2{:CW,̾6E/_N.cy.}i{bDl2?W\R&P?tVhֺS5gtR=y[>n:}z$F+\/~u/zfEhY__bj{%I׏$#] ꇱveVyb?(u`żD^v'^^;*#Gdݨ5¬\:G`.!y`(䮧c^S%N 3lkZ3S<[Y_=/߾?ww=3gp ԑw'ᗝrn ߯~hIǛ͍fh[&|qhkƽ)>,m7,vR8q~;f˴tgU͠+~WlZΩ@aEI?֨|EXa0/&imfh#b)۟캓Kie0U RͫSv<%mF_m~2 vsc$5%cH2 VY?rߑ̓D!Q$H)MGm(2_r! O'J ·Fc{G(Ec{5NaKˢՖ2'ܝ JXPʚ(!1I4֯oP BWxAUD'"s^db60JXΚYl)_7s? |JG맙`l3\W/tՋn~яp9]D0'ڽnn7rcߍZAK<k7*AĂzLQ:jRO fE-^^uIaALow1mvt5﫾m'T΍y|FDnVSQW%T=7=j3,w&Q\m/quti;e(ۮ*ȧ'Z Rnv^Ko Tҋʋ5:-@pňn 8I@]"iUً\/H>%#[S}{E2-$[Sj!]F:VV%&@#sMD3"]]JA0ERp[Ds ZF}69f#5&F?#jD=2:pBFr&pJF *M4F6Gz*kǛ1_ŃB@o^|RϧS k#Y0dĴG27Wo][?->7q<-ɜ*SpBQ4-><>:͚GjDFD"&RFZ( $DzDb:ѨTS*b[GO_ōM^31.;|\7|l7fwKjփ"Pd>n ([ΧYP (%2WT+S+ }fV6foStjaXnxw|PZ])~5|`%/sPw@mtHmJ[Hu(H, gԵeBg7C{V߽$P/hL*ˍ /Ẅ͎́ &pvz 2 H!_-L%9cMDbKΨ;`vؠv?vsӕ:o禴F#zN+$h#Ĺ}ٽ_yO7]OFyrEՃojjmL=wᢏ=>ʲKGEuh0 8yS'q*R!I y G-c=HHlJhi2D'q_gxY ("L #,Jrk)1c"DGM=LlMq/ϓ kϋMrDfzzXڼrǧ?8O `8C4lР/pHE6 H NS8=VJH-OCqG 5: jЁQ(,Hc jb8BF35P21C7srR!Db:QD%,wVzze3#gK| USS-<~fo%V\yEb5b,2wVk,6{G׳]xI٧_S:{5I瓾eub2u 7jOغEzs8.qNoͲClY`bƭwizo|geryf~ ?l ]6<vOkT_಺*gl¥Puן7ݶkM$f6O bil/[nm~ \Z-\#ƾ\ހ@*U(S8(J9L-K[;2=Ѽ'5{&KsgK)'4܂pa˷E9?F={;09,7crQ臹(愩1p2o/ޟtx 6 i @6:,N#WM2TZV - 2\a/![*(@<8o=D,)/N#&]QИ$0\)ѷv 9w%4F$.*ICɺ[O(+g<$GW\o*77ݔiV@yLb$6D;Rת473p4QI[k&hL-2a}^,o\,:2[yŽyY0G2M_>_>4y\"@o7,PZSc`Lc2̑l3ƩZ5!;ÅRe(dIiB'Tt2qK7Fh/jFe d:QˣJ r[;odQ_*15E]=`,.*#po'TU]xV/`o8tጝi%kϡ)KCS)w^?$>?|6PM㿳!;qY n|0gnC&z΄S<3s)mrr nrnd%v,2#::'c\c,'Dk⑦36򘮖F9O81{NFxrI]Ӽ֢E!1I}ڰgyv֯↢ַIdggDʩY$1h)VFx:u1Z-=VC$3%dGBDr[dRN(3rb99TQ/!>Y |疒P9$K(ŜYBL%VG2+Ŵ"jo(E`_P:*]=tƋȼ26Dn,y”tL k L>TK~Iygjc퓍$2$Bp Qj q!$!0# LQ6G66Xg={ [{Hƀ&U"7p`pe8k鰄Q?+X܍B%64dpںH??_ˡ}_ӮLPߓ"ʸwRZncq4@LրOMDvLDXYΎҨ ǿ"Z.џU5+%A1M2k1f"_ pׂQ~wzPANQ%sCL$*x TtD*`@ Z{@S|c6ۨ.E#HAKY {%;-FKOo LWGQ> oaPmGZLĘ:釄蝉):Hcs h<ސv5DQ` 2!Id `1 BAЫ P%Qf~ 5fAp`0_Xb"q>|l-a)/G^DEf>(lg_]gS& Lx?!:$w+tDfeSpKt..88e.b $fsr'8rOz " G0x$; Df,^/aFM4!x&=Ϲ~L|41~lcu?Էd1K4/ZܯE'5NXd?\SP)yFk1-!8=Rހy7D%Hluug^'~</\g_ Ji8Σnfٵ@)y؏WS; `lHfHd0b0lf]Uޒ&OZE3Xf|9ѳ1' g8`7|MnxV2jYmƥ'Xn}6#%"K~ڛJo&fqwzǤ?G^Uǣ?~я=z/޾VhK M"Em'@ C_n?ⵆCK |ńb\ 5yø_9ո-}q~κohןb$V<,q寐d'.lh\G$. ~Uݾ?TvcȑvL"E#beds0 dw+I! =cJB`^2#(=ta|>cm߫ʄޔOIM!"9㸠G٢ݥBDM*3""taʇZK'T}u\ݓwmd5]<*2sna2$@0͇.Z1bɧ{4L `7 ,'1Vmsg3puLk Ysu:]̓mUWD_!!i޶pВ˶TEA]:ta hZ;%EaގVuMƃ&(49?oQT磊}э%ДN K|_~4ޏ|c}ڏh?y&|B)mP! ʹOr4E2,b-dUz\VܢәֻsbI 2=&vzԜYdמ \;}sL,.. MM_:|~BfvteALf[ljK/G 5vdUAWm@/ o4gЩԃ]:Ma;x;?Y}Hm#R 2!g-*Nd5a* Y牣J0po'Vd&|b)el/j n7xqCY=\T魹&B|i2P[>DԞD3Uj+I|*!cNx}4Ӵ)*{_pu=kgnkK(R#kmoVTڿ|=rlkuj8_N.o~ڿhb/w~VMF ĴCM7KU GPy>5JgA\TpMJ"}\Qn/Ъx`+TL^7E(Kl:gpY& Ѳ*KsStǮ5M5ڴ~Lc}~XArt~_.mb0Svz'ͧDbo2r2k%B7Χܔ) ûo ]Vo=yW=л-۠v}7+h4}bi=+WixLV*md2n>:U[BR05NAloV&D͸ eWpQ{MՇ>US6L!e&huD!0Rv.1h-mscIqCroY\<OÿB|6t Y\7CwS5j=[~pY`6J/ijJcK[deKm;M+\t,-Vje8yN{Ld;@u{-:."zR(scPQKO"|N{no 'l:R4sO־m2p6 %7Y>,FtyGSGh3ά9O5~n'MN7Y YProA(Q9pQ $EV0&ADgQ#ȦH eLm=VYXf宔.C>Rr&sNm:];jܒP޵b}ly2;V㧃-waW-21`be& x=r)1 h@ŠQ!f(& Ȯo <"2#A` VgL ñouV׮ܩeT{]]66Χm Sɵ^y}Yq1wyrrqn˝Jl2[-Q_=ʷhAT( HA]F't.場eY貄uLgDVֵ)K.`,%dL.bɒyCԉkvPIajfʾ^}Jl󳂌7lN핽g糸jrw4~4| {l/_%g:L,"LɌVfνcLDi$h.Co e(i(d'dB66QCАpdv̺L8 1hle]M;Lib׮wڜڋ^GpU^MC)u)Rr,MJILA0+m| Vq Lq %5 Y  5G,ͦ)vzD߄~Y8}WG;DrhH7BΖ8v9hQhmDV$-Ĵƨ@r2B57DĤ X6.I[K[J:FzՁY>:i]"T_I&##גin)ɮ+

jqW*;&Aރ [P|6Ǔv |rdFe=nO)Qv*6]=(ڮ.6JU WmjZ ς싇?npdD9`,%}(mwZuJҤ',+"L!l *ܢ"@1;4tamiޢx~0ζmަIrP[+\uq##i  $JFyN7Zـg 4ڽ:Ɛ=2$I db&"K>Y./rd`".(Jq!Q:eBq.PQwژEF&s zo`?Gi|{L5>~rqikm#9K$#f`]`8É!T$%%Opxх%DRVwMOUWu[BAwq5](K7M `BV]oP4k .pˬ[jq04j'yN(6Ry!UM!K2ڵw5v\RgH,>gp}4*ɥM;p Zk8‡?ʑᗳUk9z[_EXXi|,BSwk"./q]JxgBXZ nQW\E]j;*T+TW3sq蟖.Y{cD?6ʁ%/6j1k:zH~ |!UJi #Tp4s9tTkغj?>Ehz/sq5N P.jﵕ~޸hWY$Qd"v*X+P%W *ـy5pun] z8I8z)ӈo#6Mѧz5w,>}iڂȢ!؟BLVPh(,{ F썺*Z/Pv>tSWQ]a<][ J]HnXjһ/\Wy'$T=yacꭒoџo7^LFSz9見;\ FVh=?R=RӅ`FMr5]}5MTBpv_.a양V^Upxy0ՠZx)>ɏTph 񱽚!Y{FrPy#DxF Xf2gz&|z}#^`yhU\]er(AJ1&\"M%sJy/gmen+S&( 7T 6xr 3*enʊvFGI$Ġ}6"ך5ňnڧPBٟBݛ2똥P;5bˬP[ljdeB\ ǫ$Sr%sv>1)%eyq(>0LLa Q[IEPZg vVw46Bv~xE筏ZY҇cSx+ ;'r%ia. u9SYA\c&WOO93l<Ӿ_4"Oj{o<|䍱 #Wh8\1sL5x~cij#=&hkxrskN:tU&%yFxƭԅ-.q;Ywm% -X"pCc) %7U; bR9K}w8Pw堶ixiyy\S᲍}!ا=h3uQaI"*Ш*M.fiSOme-I@eHYrc tB md:0k39kz&FCu>~^J0sQL僌%ބftj~HpZ{yrC1<_#)AYR. N*ha-WNe Q3FyTK\FxPxaN1ta !0ϽdۧȂcx7!S<КK1+  ڽॳ1IEK[QBpQ(H<7+ƂchXk%Gi5rW^yWcBd OLwlӗSTܕ+jpyWմ!+)TVgQTS\,Xn1ya<%yT.ȭѹ :&)vptq;ճTtiOVU!x)A> =w{LHF[㣶4iERR[{/ƪXz$Z"?"HOTdT$q^B8e:oMպzZ,uGz(j۲ OdKx %^t{ ;{j\X6 ,$EQp)RsNLȀY8B)B4Smww:d]pkEtwUn.Ӵr1螐[}'2Ոݨ$51%V$hL\?.Oؤ.<.\#6W//|hwND)-VN2!Ȋ/>X4E[t/^%^ @<}E G}/&Xsf\Rgm€V*KW o-BZQzC>|C7o4:p:x#}+ :dmĴ㓵wGQb%ض w^Wq Cdr򹶔%|^Ūpդ~Ц^* \e1_,U%_kB^tOv:蓻J%R Wtaֹd X3A%xf!/͚-B>k]$3è8iC,ǔeN,h%U숍jkUΆ|xwfu BI?Гƺm#?zZ w/Aj6:,&wFd.43 FRH}vo"%VeiHL^2SBHt[ "ITI$3 댂вFΚ;!.I rII9$K([1rf'CX*̤GV]-}A6i ]Pi/)ZZ/$X87)2+Ɉى|ҩ{h|\ՌQ6ZiK2z2HL,%A0 I ]I )0c57(6@!F :b&v0JOY9ŬYĮM11 DQH&L); Nz:e zwD ֏T^08Z3Yw?/qlF|&{Dq ֍RNDx|YbY&*Ul%J":.i*!eaE(/Ǥrx '7X=zŤxd^蟟;/>󧟎}xD}:He$IFڀ[K/=iҴtiiamOljh󑗬{[|HY֭ ~Iwc~XH B$˱"0d+K * ">;|Hɽ"LpId 4FpKCi,U2 *Ba$LJ3+,>Zs*wۊ6Y8j0]RRpqO}RSN;hξْ}cmq?g$ PJP]Os vSŃUIT YQP:pP%'`a9s{WdKNm!_=#i<'oZc.1ؾ\ٱw##–+a6 r [C"s&aLD4 &R0cb)-gniq`sszNz4C׏\03G˦j7T0W#G)xkToV$k 8Pcʊ 5Em\Ef6'EL>[+kM)J z+ }Y 8?tkx[ ۱9ș^Wտβ2AfA慗ol/&ٗ^n1lXю#4w x/q`(N PHDf[쐃Ա΍7UJ[JDLy2M(rYnh *fV⍳E)PƄpe(6x O'O9R%ff1s6SezI+i탣 ̋ )1+83? V+hR-#k=rHy2+)Jye2V "bd!GP1s)YmP8{R." RPY`n!+Yˬl͢唑[gZ %*B)w|T#ɕbE27) %g*0b9ĭ@:CB9٦)2WZv#ueͨAf 2[j% VFcqLkGLKHHqun). f;sx|4D?e(@(Y #G<r^b]kouׅ_ SERV9iQ$ s&BV`1d31>\mTt ќ~1_uF}a7G͂{Cc|@rzڶyPBlyb8 Z,WqwHu"!$Xj^cpN2@?/:!"3fi%62nu4[mMr%%JQ*dLL#9G<,׏GiZ9a*'؃[|RNi9&c8xI$r#caOGw2Rn-+d9Vb#0 P_!Zꉏ Q 5'҉ڗ},eOM78F?pq5mmyH?UJl _4[+v: kM׍PuOX>9mj :˗mR^ Mf]{E%(*o^Q, "pkBO6E′xkR4T3cBrBjh&1EFU ps,uHP賚 8 w0׉Hf2MR9W)zSB]pX8(?23E<ۓr N'5  FWUaƆڼ+ cG<&\L;ɰRLD5S4- ^Hא=(fQF3l2N *oI"72WtB)s6# Â/]w< .*jWj.hDE.,8gIfːRu8}Ĩa(H,&!N C/ڳkByj/8FB{p~VT; bTD$TD+Mr(tioQ& 4&ZhQk!S> /`NX*q",(NS<(b`OrŬ"م9m?uk:"/bu=Š$L$Òr1g` "1Q0:m0CmaT<A[ך\O#76!A6am X2Q?0QǀՌ\q cMǙn5gޑ&4l4#4uդ~'XOpkB \ekU WoL^SN |_F_?}:M|&drw3Jf|w4?_0UӅj pC{; nc=s9 ᛶlye= {o4b>ß;O Ղ[wa8b=Cyכ/.K% ]-&Veju*CWe ]2tbBɪ ]]d2tUU*CWe*RmC,Յ*CW ]2tUU*CW ,a7a"0FC7Guڤ\_8?`'}3N63lb;z7o ޏv.i.~3>X%EJr 8IUG}Q `M-Vه!MF=n0GY/\VŠMxc&m7mvD&{F3ݰģNY2@G%WOݛMFuosK kC): >mT\rBHZ]}JqȢ9ɜVX'Nl_Ӽ}B{6(w@#ƉY͸cBg, V& OTQJZ`EbDF'NkW+zt&qzy7ơ , 8kQ ^w{j>׎a&[_6!'u);Gq_ ^ec5Ve N~L*p|4~o/#g_sq|ϟg۵vb<+͂$y¸h,e.=qLٷ̙I9ئ0~@2Q% h$sЮܦm}nn$ajCj~}^{ݙM5;kj7p҂GA/{Je;K{=h;^Yu7G㹩x&G<*7&A_P h @Q|Bbd)[Nj@S T#cAp$}R2-\Ź>H%h +.fUTiOSkff7\~r%cn\kZC6mv=]{ȳ nL?ltۼ%.^vs%3fW^h?dכݼ}G?.]|Å;黛}-G,+{6d@_`=T߇aᕽ_ۻ_SLR`𒆢šDZ#=]]U]WW \ݷf ]N-kH}=KƒA6T툕AYʕt#B Ns1JUȨEoPޤlHRIpxaR[ERcib ~9vcQw5J5.K>N9u`6a4:zl~+jZxZH^{Q&E+ڨ$sJP$2%\JlB p1jve)µaHNF|aKž85F%KY#*Ĉ :'AqˣRe+?0ƒbu*`Xa\@KeV8eHQ;D&#EDM7tXj~'1uߵ.јvƂ$="̀GrP1Ps,BaÈvƆV{ Z{澩Zko15>9)Lc4d6d 3*w'9#`G^kc#nM[ߜC|΃/=̩$M’ ,*$J2qImɲŨL > ;S#)Ibc4Ad* ͸J&51``VAyi@k}ྕK;0~(wTYM렿H)4!a 59k"%X!Rva'Pt˅[ o͠fF8A3Z:i MZV.h iSBj5!aFF-!DIs"D K`o^M8_5ϊW;ϜQ?8Wݞ/-Dϥd9 lXb%ُqb}L9. ?%+(ospR)O(ཾ6(b%u2YF`@2O!0Es:gRcFpa{םE't>yW n ZlBthtHp;I:_Âti?;Mx>tu~|f~R?dJ&p|qy8(D)MS[ X~sb#R ç+8+ ә(U MZgedf7ϯ'M`hl ̅^\Yg+;g(DzyK cPH~$onR?̣[ CG UL'߻z4{BvTVZMnԚͧR\s($ }9W̒Ewp6(X)%bEp}Y_\w=}y~˷ ؞p+0Nts~7?754W M[gh' Ƹ59qo0鸅Buӓ J?d^}}/7[eZ)?4;3A+&+| d~Q^27颀KPQ:/E _Ā:[?[ >j^X#-ߖ֖l7x"H cD1Г$ݠa}GbYd(l(=šyyA>msQL. ).Ȥ@v^&󈐏Ԑۭc瞎N+48\n6Ľ}D5&= fZljaJx S6RT^{L>$r,_2;Ab9R <@#Ywmޡ;Hb<ED0hKBЊ@xAOT!̭W Bu4D{S0 U"b^b689viIEZr;P kk ":ͩМonb1T)b+t@Hm <4u :B3~y7ߝlV%ɤ6dARF|<1KPM|(":Y5,regZ֗{?ү uKpQ9]? u9U{5/N}Su?w.(+V$u_#DgzT?d~r䪛b 1VoXה*Sk6j~3K"Ubt>7 RVXdpn$9CU"q,nKbkZ-K ].0D'KQd*8 [cq`bET7{0bEcD2Y+CJ?.5eDDূFQ4E 2&.؋8kZr8+^7o]~즄 Zau7)ּN2<rmHQ\@2˰ 2V~3b0ln/6Vm*Qq3(8. Y%#DcvQ)<YA:^N m:i(׷-,xr,i"y|Jn7Q|T|}\YonڨmvYNWdG0@vuV-1.gvtn2Wta'l̮W).e7v?[/^T,v4R.*[Ƣc\g8kcbҹVpbA12}"6:}0 :#.High՞4#,RFXp46TvPZf#ϤH}Z$>_x+eE8[q4SS=L`hKѾh-Vx5S]+m5ӧ XM]%5vR]%h [vdLs"5r![zMP9T3:NB*778G:p5\̥*%=b2n{æ\ N*l:Ai˦ V 1cWAQ>>/4QBKxY||dzB(a]ib\X)z) a.\ 69 >bAQ^ wd3iR8hُ矎 ѳp~q| +$bTEbK`B3cpEv{% Uc`RY9'Z{w E(7^uŪ:>z޺=;xWE 36MWrz),* ~1P76mő#>7஄%F8YΌy<r0O k|99^ydR)/KѣXB)MWJiɬH2H AHRVat2LwZ D佖豉hj4BB_l 5#'ёCH]TSյmo)Qjoc V-Dڵە p*TEmgwt=9zp9.s~%[ȕRb*EߑL:Q:q13kk:Sl{XNmV_u,:9'غ~Fϳww>yj|+ޮyzb3'KǷ17YMjnKc/yȫ/(־01a(D G~\/c%iOat vRa{n O԰sM|gl/sO4oH"PMT_H0sŃLĈiA O)cD+THYJdQg>z]:r\[mdy'?gNjϸR!8NSUHj$ s#e^ $RȀ#[ Ҵe Yl)2V@ΧBqd8M2#Gٻ6$WIr x/6{K _%)R&%aU )RH)i X8ꞪꧪjJ$ mc\kaC~_fWѬ;ۏ)ʧ+O+ƀ -*!15hUA&A-d0or2O3^t^<7Ъ"{=佣A\K  *p j*zS?2(ѕ 8w9+-B[P>i&~gHIY'!XJPK3/-.B Fԝ@Qinn'p9R %#B&Csz5^MLEuCVحawu|Stula?+2@j!I]E4QmXalrz#Of? tr2QE~˪p&ӓ8&tQ ֙3kih ܑ٩% QޝwmaYbǪ]$131vtzl''śElx4;h7MQKk ,ḠCL~@Q]?˚1rp t6ͩv5  a,YeW: yIpi c'hnʔ&o:>gw?kzO~rrޱ&c-ޔpMWoXb-a$\iW6/m33tt-fӽ:h 5{z4kؤJv^l7ĝyaD0{7}/)cչ\cN(4RYrBHZ=1hq*AwwGqG,|dF(!9cHx0np,D:((zpWܡNf[YT+g7rRbk|yc#M<;ʦ;Ed>f$ h ɺ:OXid 9ϓ#R/EEo$fL;:ÞY7/Z| ]s͎Jm; C{4%ºA?UJЎsϫ(sP UFZqFBe NJh(sG膗bi7{͢ L)eZx]ІHBb9Z}(It`Ad2h(Djv}gϠ1ti]mtHݸjG**,܊2{Lu(hDUMyw_rqSeH j賨-7yA ;/ @C8G WB*P<0VwRy_n2D~hW+ÛxwM6y9xe6wg#dzU^&?\ȫWk-z4!)1N;m9^_G5bBmպn}_0$W#>+gsђW+@de&BsC[wF=}6쇃F풜gϒQ+%i(gs1HɞWu౪_Ȏc8Z~w@\ّRd@$WNQ\pPYTIB*Hb)v*ʻwwb[{/Zʟ&AVѤ< TŚ$GmEI>z j#Ⱦ6NLܭ4fkDү\G,զY20&#=cGwT t$&9]_#p<ĪEu@_P~JQ}@;fog(ӊ])ÎϫC-W  No\g;*H*;o ug>{h+,66&jDŽVAB9c:Ibs )P6ӓd"I 0@KrSS"E|$ШA$!D Ԯ#8?X]9gLq<J;:YbΪ*;#thU6M"҈[h\ՌQ.ƾߍIƌ$$G=SdIBy;B˩e^Hr͗8.d;FhC me Π#w -o0O}&W>SkԾ7}j=Bg1 I2VjW JHm1;:{~V!b$9Qp2E1 C̊F(4` e&,DHJ%ܬCC/7\NfkYsmIƩ* HaXm"YVFNL u$b]*ԫ+ *R%ϕ{TnQ7.*{"p:J&(.i'#wWylxm2ў䮅uυ,P԰YIւgDKk hѣ9$G)Pڞw; nㆠ'B#0Q>$d!FD ^f'fl^*H+8as"-eΪĂ,d@^ /N:2&5IGUKؼ^j--\?RV{\ p@~@{.KgH0Y{b8 Rw7~x0͟Wם.++k͋u1? ԀFs { .`#%lO`vj L^%#o:lD9tByt\O"-  Cr}|M6Ćv0G ֪]%\翵ϫuׇg'UjmX9܄Ο~}b@ l2q8- 0R5pa2As{>_kw?+ޜ]g׉h1'pFãfe\%G2Rc;^,(tGbH']aaY=aIp>d6LƣB. n&sTJnu>ɮQ;BR5Cdg  &M&-V?5]:*'~o67~xz?㻷?.|~/߼{Ƿ?7|+p}.E{8 x\#3/K& 8 +cs64 |\eOBS`@ZeF!evKo'}תSL)E8I q7ZED3K`ˈc.2h0ŽGݹKMz)"@3`~*j e(9yA^^>r7!rYu6O/wm5L_^xFRL'%+ ʩj } #rK&{L.HX B@֒T"*aJsOG1wA#<&Yjh@\43<\\>ɲ@Dp>s+$RaXbAגqA.'X\C&SK~]JtRB%:DZWLS|xuGxՀZܳfQǮ&3q;(|G% PE½TN8S%*:ufGNz}TN)tTQr%LNV>X -VL!L'47`hR;( T`]vzh!7x1XKjCɟe[`s,,Q)H(JB2Q43ºeApep[JzMy9hM* ]iPm`ot0f{& 0{xքTQGS -GUW\&Q9.JYlfKXVi`"n&X0\XSr@gƸHʖWaBeLX@N%eL Os)h})KTҍs;`C ]hgb͏ tf*'>"#*2b|sGJN"qѢX̛3䦣xV}EkDEB8SD*Ƙ5AgHUngSs#gHo=Hz Z5'!&SwIt`s F8P!R!ʄCt3(%<M.6ai]q]l{#%rF;N7rmuQnߍJgӛ:_ǽpx[&Cp*fs:S}#*WKJ_%\B*BIQIc"4 g"ENJ=D㣏I[ M19%vv*QhhN_Z"OlmVj ZPv{cїR7lzf.Mmo8X-#ԸJXj!^ǭH/#>RWwm 5Ri(09U_}էʕQ)-mIbx"}PWN_cX]jŷY+NFXc7Kg}o:TmחZwb۞S`煰O9Tst!/p!e^͚u1*fCd\ڄeo*CV?'_fBIyF %=h>Y:%Y"bJB:9!:Di#(*eǭ dK ].:+9=1Um%U3 tt4qL(gMK<>7- $"i.3$Ac *QĪV. TH!Q^dշ(j?dT8ITZ9dZȎ:8j/)hiv&Lݱk@ՊN>+9V Z7黾@1r@Ί&kQ -S[ L{Ͷ|~*'GE=ih:U(a5B%_NykƉ*kUq5&e9*2gkchs*Z!Bwvp`:QHsq4qv#x\e4,2B?23>)%jciviev/.۫ˏ_##2=Q\M3}ɕ])H0V9UB&дU[WQ_yw Q 5d/d)TRd8Vl+r;|O*MMdاg7bۛb>:Dm~g>3ءE쐉ꠑ|,$4)^Z#'M89jk412dF,:b5X(]2 5p!Z".6Nx4qvaԷ&hV hq("ȈHgD<#DkLd0T'Dĵ2d6T|bx0͊|5T,kÙcr\L1dDHȤ%[,qjnl08납Yt9<2.3.q1%[rd0m]CJŠut@r"]g\| \|.x8uzd<PvSy 0:fX\kat*]]TpvƏ~葍ՏJEqI5DZkP6mPQJ7vٿz{Q ׵pa#y=jok¬<6>yxY S!;}"vdn z)ӎn(Q:pΩ]DD;$|0L>1%9lS)E'Cv$A~)9Iծ2-mc8C сw3X|#k}>uiwmgi)GVP}8,%L}&RI}}&Qp_`}8ɤٞ\59FS&-WMJp%•3(MPۛf"CW~M OVȋ9uF[9ΨJ:$w% zn.Fv1Y$)D]r܅E/gL{}L\5P:C2j;^t|&TEw{_<_MGt -B8Oֿ߅{>g죏%eVb2ܵ=-f8!6RVJMj[&?~ -v)|nBf |\_ ۛUa{[}J]^&2.zr1kmZS9*_!3#/&y<BĮAX[qܠUog&F ?/n:;F$]v?zȿw/u-vvRg2j #۫ܵ9kjE|_̫ i=\ X>O?4t>yo[Onr?%'CʿZr3dRO8n Y /Z?mQ@CBpN?>Uk: ɤY:IsOq)۸SA7`e{FO 5O5_Dd~{ka-0g)ٱVsA%vx0U3E*ֆ 3'ӻ1y<F9nђlۧWf7s~Ք {} fI}VQ?;}@2)KE}R6PEcևF8'V{ F־ե+cY7q̥'emYbQ?R^cz4eXDhU#%kc_CB^}~sxYZ-hYo6(rᯝS}wDpp<?c7Ie(D-6xBtu5sqƷ礷QDttpn[Ck7+lyzlzd^b5q8XV _b5)ɟ_b}/olez`ׇoOdnreMbfYlvr{S>ltT6?͸VCYU*Xvp7?uf1qSٞOR0Eb2{wo&.ʵ@ՑоYo3{)>ZޠkyRJLœiu07\M^Q|'(彖&Z ! M`gO&ņ$䩤MZI=nRnf=_Nm498r_cЖ祖-]L(Xaj1a<B qv4CWwtGd5؅p/H eG[=P,I L7q% L7i~0ݤd>V lNU6qh>k R"8՗W:rEzţ/ڬ],U^XG 3n%+xַϭF7#Z%w1iffMk7r^l} BxNIs~3Z=xP%\&)M?^uFXm_%pb43Lq5Sۚ,cKT$Cft(m-y?z?|@8jkQu>j>Hc2W0#D_x#[W9ɮBƩ4eD^]BQJFWDA1>rζ'/;]ݲAnY>H6}bZJ\$M.`d TM\\rdN.QSZbR0e6rcz~Ce %B I)P6ĘeJV ['F+ _` )T&]ه&̶TacΥ'X2TF\r˜C!rPZnc35/P鄋|)bN O̅$jPc*x J :5v (Ր-ժuʭ Z YV] 2itu!E=|`!=lih83Ji Yu2Q1Zsl'Q) ִ!&3{ZtClaG)Lc7 ]-P,IfXARSr"X\L *ZW$b;8Jm o"кh/<aS]TpօD2id}GA#d¿Im(qsPj^Ȋ+C Z 8' BI 1ۢ*D5 W= %g"z۬/ɻ10y O )GXG&&H-^)01R,PK[KjB0t1H-I ^dDԨq1cU"lR9C5TeO$!( fX0! +@]Q5KD !6^9RE@$EjՐN SXE M%X/`tT-?uTUӒPvT3p`8`D*0XT 9[U(oRSQ`["(k0h{uC,Z&X@ri+w%Aq%cT*@5Ȕ5}#V YNUg%TJX*1+M,8A2`&!M(} p%S1mo@NUL+M)3b3.WR̈i <o"11[Va3AM|00'[ L6%\~"ؙf؀,Q q+9nLW2/ ɴddܛKHSΨD\EVSMhckDSX\= /G*Bh4H5 )c @ e46A a+"ƛpՕb1F Lg; Me/Rڵ#RdۊYeZQ}"5jw"B"L?14[+՛v^t}z¯\p 29Voܦ:h-b*d@X8pie Pdn)тҥ`ruARnY L͆HyA3 vXXm|" cA1J$bBW&x?{ȑ]0(1?l6k6# )TH$%ME {0fvιU* U t~ ZLm( AJt+Sx bd06]ƂA,TǨ/X?ON_EiVp6SRe%)8 dxvc0[>2{q0N‚.&jH-Vb =N`  \ZAyH!*f7]H_GWT@5ʪ+x  B{ RXG[8:&oCUQXÎnqz5|< hw6Q o,pZnkT8@ZiE \ۍh#:& gBi4@?x2ZF"v7H-28\ɂ0CAasWc;gQ0ut",U@k6֜ß?HeUHYtgi&0 ֨H `f)4ͫi*m-=Xu^a@̤I@ |^3~M:(2 cmbHu`&n7=Hc]y^٭N0:ǣ7VELn,ԵEW@7]pd3 L[ vh ,EaoO 5$Z\@ǐUFCon0?oC{o>(5k5p7%<%FM:3Fs\@7<"d5H%FF˄ A(P`AH^GIO p'r5loQaݲIY!|_(W؊qE`1\ƒr\ \1[B|;!" ^Z0S! !-JFJQ5 }ԳPuHc-й `AE ѳdVl,2!r#SۑZXz@pkOLA I d:USEce oY#9akP*`jho:k/5s%z56R zx A,gfFZV2Za5!WX7)1Fd%q<{ ̠!GXe(1tqƛTV/:^H`o7jgC1IXUW.u/F,Ҭl*݄H"r SW;LgAS@$Ҳ):KckT];g; P G~W[ݤNkqtzqآ(P*>`,^X/u߂^v0+HBɕڄA_ ޾Eom~'[pE^Wj\m+QJA72e@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ2Y@4%iUWRV*:M*L R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%[Va[@ף§%>@m]-*< '%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R*Ʉ~MJ HSy_*vJ X9)ނW|՟OOkMx~{f斸{ξ}wrf'rf2Z?˯kWs!~?ǯ(ŵI0*(`#1WB5՞cܻxX<=ˣ?_}$3kݶ=8wH"G<6Ee?.^3[ ͋"J0cXR*,e .usV ?_.JBi,߼;'B] 3:ْ7[͆$o$y#IHF7$o$y#IHF7$o$y#IHF7$o$y#IHF7$o$y#IHF7$o$y#IHF#y^ɛM-5mO-//S)yXwדPVE = q(J^CW5+![T6۵~/6pS뭆>skM H*8?B aӴ̭tmr&h?Kޯzv~Lg%_a&[I&O<0m]oh<^Ҿ_PGDGΎ']&~&X **VY4vG;l<gS$(tu)x%&h]@`/*ԶěCR0[ىbVͿ_^/lN^zA"7|G6ŐZinPŻ~<4_+ [zR|;_rnA?_ __lf٪{>˫}}?`$''g45ʸ୭[kr~g*˽α1¶Cd^|@5l?Ʃon/w!!w>9UX{WFYiJI /`̲`^WaQYuյ)ZL/RF<#4_ݘ%H E% },y)?ƷdH^cTIP ښ8N^8sNZ\[7Z4(~n];%hfe"rtM۴@xm)uaHwzZl!CCz(ٍ~qc'9^"o.\}344Xxw{NIWE}΂ Ujhb%Pw>C͢hGgyQwtޕι>ܭ2Z,}: |V_&wca-Y1Y%]M*G?^M//:ե]bolzqu"d=Zțmo[RmZfpb?wnÌQ{ e~ÍMWO2,Ta{Fm=2}azuE=۸{m2{ܓ.r6CaMJ('%?8YŁq˳0PiޟȺǺW(-j/[8V>oW;ƹC/qdh:c,%Q@eQEd]r ?Ǿ9 Z͉2YLުvĔR6DHzjULEC5 ̜G+X`|>Fg}9nGizyc.˷Tp#S^Ts_Bf0~~|˫;R`bN+uЀZI*/ՆvO,kgz771fwuv71۹ly0NwfGtR;pb:(ލnbk.yNc=L;5<7T^1SCc̍ qs4ws-Dl;=brwEȪn5>r)Qmǝh|Ju;wr蠙-쓇֋3[5}w,ًͭvd v6΄t$yeפ9;>U5<w H3g?^/=@mU?fc]d/a>mryq؁ۥUӏYgފ*gDNGxDϵsOV\1 +CF<` f# S!!Ez㑒IK6IR+2Dz(NG]#/,W&chrjdvƞH9vaYc4$)| 4 ,Rpi$a :XM휕 sccڢ9B~/koܷ"[}V(Q9ʁֺqrOlWV{ۡAJ8QU8QPOxOVni[]g%}^?[xEPNr[KK4ڪ)A:B6[e jLdeo}Sag 1wRhkb$ZJXC;9$VjSR$CHO_ S>49 kV˚TɶC^U@nJ'l#MDW-2bx\ԁfL8%楃1oΩIƒ*w2lCN 2 S&C?oԎ^r{ZuYZfer},4Y9VJW}|oߩy\̓L}8Nc@Zytq%;>mGs|^. i⸜Qm3BFc%8&f]y\ˬ\o!k(trk2v êw\Q8ߺZr{tTz\wmXg]6"lmtO,9`^VIddE,.U%/Of?*A5QJtnα[&hp"՞UAtk%,!ap~K*aC1ףE}{r3Ÿ˛Yrv_. sevz۹Gۑq~I@޻84vCC%[jj47ec3~Q0u 9@o\uFK:mlN:jC_aͧS^s̐x}3Ec\;(V=*@'˅\_:ߞ:ۗޞ=?/a F`, M"Es#@{M7qITUV9{=,gڽ?},VmMWjQ˞3cU'nL;t%ORe8ã;QI;RQ:"ܕ DXd Oj 5cu"vD/"+vԮ!ƈ$b`'I`a~ҔGbYd(lk(=aye5}mߋ$QL! ).d@v:9&󈐏̐Κ*K'*4MTidM;O#dy^0& ~(ɞTfYS8S KqGQ%)V(EDibyںCϵm}ݡP@<` `LOGzSC4^=f%:=V), l=3aئHփ½u)\̌Мb6qV9K}9fu.pa?w *?v~o+o䨼i'0i#1-__;)_M{yz:zWW^ (k[WL GQAJ1(STsq*=G_:nB:촨gMxƢԑ1Mʔ<_ZfhrΕwױMTary\~U]Ц˚7ޛЄZpnt~X/Ө׊Ho;-_{9:9ZبKROԃ/x: h>>J]Q-T%y|HR?.a`70 :ݙi;g?wgՋ|D/tVǂ z\Tbf kSL5l4̲&<1i_}hzt'cQ)ӂ.TɂĔ#"ᷠo:fZ۶l qA'F`mdň S<[Y$D%eKo}s`I$I)8)}ۛv߳U|8o"5mtaPk-qXh cS-I)<7OÔBTᤓ[Xg 8 Yli淭kg7zk 1hSΡq.1X9\wc;cJȗZrV:Xp zsekk4l_e~r@RMJ V[N5*Cz)FVL\@S$2*561VFNr0޻oe(#t}6~|_| b UqQ9XMӫ^K!*ɾ(yfz͑ĦΜɝkxHK7uӐTflzJ,sDjRRQ*SJ5>< yщWI€W\"E#M Gտ/TGof&PtUm3޺o0=>2ĕ9f- l$%Ƴq̋J2(HZ%t#+Yqſ+qtZ_>( |V ؟H\[4CǷ{%O=aeõەw"r?5xST}EOW\tz&/jr. ͠7(V^I3m{n=m,OEMN&;^?z)T~ pөd ^(%cMʫ *+$`aK#,894N*že5b[R]K@ĉ #Ix" 5L1U`f%Vꘒ &cy4F,PT9F$㑅H>RSFD<0)fCʘx1l⬸Uo`+amb c=]+p*Y̦4,V ۵+SNJS,(*׆D-- #Qtʲsk f NYx'$dՄdy668=:|Z"6oTն#WU)v]%{sKu Rݎܾ2UScd`'0.oͺj. } &wK m:4+ ՗Vl=ZٚR2a^!X *6xΨjgϡNjy5h*FZ8[[f'& Ǵ.LX‡n2UaR[C㺼ESc\bҥVpba2ֲyuYt<$dURT zƌƁZ0cĜ3WL#,8ZI}aVf=[!d))> f^]ߗ͒V NPoR&u}O~Prerڕ_rlZUjT ʔV0 bRn `D]@dcS[ot41!`vR8B%DeMCޗojHOdcLX] 7V1GΞ cqU|S5.-;; }vԹ% %eJ%7iQh L/Y[JRR"ELK+LOu:UjMwH I&+%cB)7j<:2,"1rbrX+?"D4&Ô. =:K]Zry@ՖI5<`}vG,h"RQ5ɿ!餮n:C\ @uC`J#RFn]DFrMGAGAQԧ7q Qax^9@(Ñ#2# b"Xp[BsDo c25LA͙+YL'd usuѷ>!!G ,KaxDܧKY!r! .Vzմ>UgJZ׃]u I2ݢYs47j'σ7!/=&0f(l:g'b54h(*ĠYgӘG0ƌAc,6ʌ ; Q1$- NP"FLMgB~|v~=o?={`kb2,e#˒ɮ ︰N )?&%d ]|}o[X~W0l[a%tluX^W#M2%PU2.Le$j ת/D~L;AޑȩDdCDI\*pi ΍MjA< yiVY`B<UX0;cƌL"^ˈiD0m il=ggERާkWK;S֜ #E.J+G a 89‘ٴ+`Kic -,׈NrEwo> $rro%r{v.qbPM+: JA^onLX(z2%r|'%?vC]eVVT~8,3j#I)K@[,4QTk- EE!'YQ(=4cQ*'brK%` `̙R18#c> iƺX3cjpX8(mtqñ)_`c߆n]< v_#6Ĥ$ {(IIq@Ӣ^S攆lSS !{D%$ؤB:`^e)'Et!"'UUymgL}Alڱ&jckQeMN8冃@m3FЂz&?rBP0c:_Y1 D hG%g!QNq{Na&xXc0 "fӏugFD"b4^T'CGԂJ;# "*,F0E$JqrA;Y15d`(QHLd$&:łK "8 H\0"~ -O}ufӒuqeEb4/c{E(Sn Rk8`gY-a&u$Kma6Xyf<\3g{ 6 [(\Ukr^+$ڻIֆ?8au0նJRpdgW2]%59q̩{WIZBp S,zWI\j*Ie W8C$8B P*IUpJ08 Jky0p%EH \%iTp: gG\|(CcU.gg3* Z3G:IIX)?#)޿;GF /g^b_+O ?Z!;cA`tW LK&ж7t +:cu鵓d8W6aRKX |JL]sÔ2I}9?T*Æ:'1̅+dP, "Jc;|/A+\ tm \kkMp c!JrRp3ܕdڊD rȝf8ߧ6T6.;$zѕE|\}ˏpb,HNJg{`HZ!iomq6-_1K.k)3΅$.LD6.ii3k* 8'ѼmIٖw=ݣEpynFasʼO|QY6?Xq(ǹ׫S}ie=dR\搵V BU% g]<^J nTo@?VÜqo]:UckYu?"Vewg=iq3]V\2$'VwCe8R[F Xf2S^Hn~5T'-_uՕy{6K}o=ewqYn߸*ۧEl^W]|׹qY/vŅ9Cy-׫7u$|t9Tbe V1)0h sCђI>uyaBGng.;y8 n|0gnC&z΄S<3s)mrr ,KfAGI8g9!Z4YJ$<&79kgÓ~XܒUG2JF%iYVt|{.і>zDςgDʩI$1 :]4SYhة0"Ppȅ`蠷2X!xIBP(gȺ 8ctU:&I\R*GFK({) 9Ѕ%VG2+ӊn҅? 8RWƆmsk3]bš}v*ҩ[h\ՌQXjM2d& DB T0Fm2 iBX܃2E-ƲC~!k_mݿhC[e#gnv/rߏ;П^ 8#K&wP d#%glRrxbEӳKq:YungO>0:u&#֊Rstt cmoiDӻ{H_u‹EpvM.̉9Y.gvQD6(~0<z2=Zi[7&nY aQ4eJ>\&z|g?ap^ zm+ h9ԽA IHXs8OǟI{^}6'9hR'~o4ͽ?;&vo?9zW=zGo^?Uo40췑 7'׍׺OwxW]Su]z:k!{]|HYfI_eΊջ?er$_@WQ׃K$Dmq^JEOSB,!6p(YN) ]YQU9 O0;I۾әzNgHP>Ϫ$V^VA _i΄Щ+@]ΝG=RsČ6 xIFw^А3̐p&#p9I&H҉ᇭJaYzrLٞJӇpcz4>[sMȉ$dnsQ{2$ 䜩iFr勏V:Ɯ;Dx 0q4MaڒuKU+JX4vg<},48[7M 4B)M B5R,PT YQPEZpB'9asj{o8m,}vg$mrnuɐ%dsȸYhHpK$lurPB s,,msiVm7<ߎq'V{'âS1zI'w{o$z{aw`qNPh_ԷQ\?rO@b`^P;9tǓn=ޗ~9C-xu ZXFԍKR=r\I{T:Jsjr[WVA]zu9# 9 "sUPR"Da/S2EW),r%a+u{ ;2 Is LZI笑9pֆ8^}Mz"hW8j>7^-o қN"[cAGzBK';z'ԅI#}* VWTsf yg|`wNoImVe yQܦ U*gd286J`ʹo4̃G& aB+٨aJa׃rec'ʟO{Cf90B3 m^Ԕ B Ϝ48VX4[L!h)pn:r;s%xNBf|凗QfQiإ7'$5eI iA^Hș[K{ɶԮ f](k$׺VIe[Jj&y>-622Z7L{?7o?}Ӗ_xY5k5 h0n!u{FU7Fzu&vj8藍t#n] GsR~ə=EW0?h`pquO"OU)i1 @i_t0u;jvŷʁZ`)a:! 12aM΢"fVʇriʘ6OAZea @ )@.2KəeNg2mw*hfXz5_7-2{St|O$0pF *[lBȥBV3ŠQL8QjpM][I?+^~JE!!*c jg.QՈhU?uM]֝e$Oxlx$,E}%=+݉WĝxrRa!#mq#eJ=CU޺.bm\.<%(!)9Njk PK(8-ALGӮ>2b#MLrCAEv>2IIqEM,ծՑ彲Mb0 3 j-$& fh/#vHzR`ZD"rReFlGl/;Dmv_ڭ+D`y0(R:zi*PQl^<)Y1 H hGB12XS!d04]X+~/ b6qfD-"`EK;# "*,yH<(E f(o xm8XG!1pz ",)&LR#1s)YM yò}:%"ˌWq ^Rǰ VQ&|,p칇gY-A@u$K]a6yfI`w'Ղ:0I88#!0)#d"FI* Gp *,80eZ 4"VL .}f9~&'cI+[q(6,S&ַ.~Ir\Gב㳶GbT1*&>F)|MgaT5q9Aϔ:RjKf2z7;˜l5gU$o:JyՖ%nmZ.o ۵UoWXfjZ4-;M vnM;K$h͒:KE4. )Vj9y{z9+,vQjP)/KU !R$8$"Dި6]^!{pC7@36z޼|3"W6Ko"Ť-o%ƟZ֫t:m\saWf!,$snWMgw^'ޠF1,o:+1oϺ5f\v\Ƽ1BYs2IkVOgm&&I*˭͟6>ƽnCOUkDQv10pe4ؔ,XeZDiL`B-uw:{D~RAȮNiLx&@f$R" <JGu`&bĴ =xo<3'ZekLEMi(vGciiB`^>iZk_^T,'q˃Ҏ_m {L.3TjoӔ@6Id\H2-w<*xvk6n', }ijcq5N" ȑ7'.%N%>t+pNPc! wK9Xch6.7r6r6ӷ޶;'ˏҒ AwRw{Z?O_NQ:_bPݛ%ޔgDx51hLXBISo' $uݸN[-'FzOɘ ߧQ >HFx0*{C沕lR?;X[مybnP2{)E- Dfҗrx.칛^<#HH`rAҌ#FBpG${$&22oa0MrpU;)r=h3cp`;VC wJnbc͓7?AwEIn0;m`y,p1)!v*oè'GGnq7hm"9wZ<\tcopÓhҗqƷdv7.'!+wPŬ]{c:s}A78DNU'˳ᏱR37qHcP<A-4C'+Ƭjk{ Tj6WY %6zd̿mJ= .Kg?^֑\룩#uQr?_փ⋆LT=re.5oKel+ xw31z8zI?V'?*Bk8~פ]t'ϧ}~~}e=ՙRTUcI*~^;<~ x{}W4Ňˋ8i1\;w\]tNm 6<ק9ӾNS2xS` zƣ s=?~_5rSΨ֤I|ׯo=[_}Ý)zUzgziwzfDz~%FvnZo0:e6owooWsTXq T]R~X!j͗j~sjNAfS UhsR"1Sj+jeͨq,n{M~L|ާSBn]8 5J?q-&lu0tMS wLR:^\2An~< 6>k8f:6v CR']ܛw7Μ9Km/ꙛXd3m=9V{7ƇU"pEQKQ}.! gI{NwgΫ9fgaep HR.'#w8 jI&)rlU9#s-eā.0ND}.ز$s3:GI歳( 8M-C)븛ġMDI;8ќNvWd~DIاzs|W*UpT*%#OM^W0"3\_FR;'t4{=#WG~xs$è$$a{sD;nJΪ-{i>UNy*^YA$EPS!!|HB ,o'y:-&,5lP6L:- ۯ+Ew~:O^0YɧŸ|=|gt={ /(T\LQ*VF6ڻLIz7˃V?[/=\"q@p@*+ءUV}D-\@"\I \%r5:j&WVzpEf\%hW\F\;\%*o6Ec@ղCLklb?j K놥E`^'L\H'񜕊[u.8;zC4 n>wH*>wB)07~!U1|&Nkv2 SW2`K.-2lp7,;ˣ_$ \7ɅnX9h &cA"JCoƛ  2RGJ4)B5Q=N)]pz).cXd|%g 3A5oy<_=yA~.8gdb 1\ȡ`"@%nvrLlY:$#ܸ+ W(`R'/'Ζ׬CESvy/Ϛ۳{vZixU{&=^0RV ~)!Uͫbz͔/1 ,N):EX#.5}{qK}L{`:FH5w* '-L@Gu}(\D.?/D{DjWp޸ΪhU1:d0%|X|e{~x,(݀ϛ$=0Xdա3DpƣlgՔz~;/,Z6-8w4.3f`?z9ShmDz#P:,8IDӤwγ2fm N6xX1$t<peMn * o\Y7{]XT][Dw z!`npw\(wp)BSϘ8y*ɨ1gz4‚S/ woT.ОVNC;Rl%iy#hYC}\߆I{;xҏe$<Ѿ(R_bWys1@lXoRz֓=J\& cqR>/>>_t#Z}*KoUx;|v4a)~Ë)փuMZnN~гaLE>LC^uo~t\LE*DUz"ԒwV}JSѾI]'~n͒N/?#/yFc e & ¼&BP5;LjnDh:vF|IvoYԲ+MfBۚ0Odw0Ɲ)S'S& 5L%XsJ)-lu5ivc4S*nl1*NcF1ZleF1L!lrD儥$`. j,Y/ }czQ\r0`kg`^bST ,Ȳ+r@@09G'jwDn^{b50]1sNUmrA:{M,s9eDڙ6B&F4095#xYKD)`,J(6_J2dc#{YMtP+Y m;blB=) Rm #v1r6# a<.;6%eQCN]tʹ<#%RUЫĵj8:$_g1.Ea\d.v}̓*N5I|=I  jT gFs :\. ]Pca^Lg#7zk}E\@h*i:i*)Of@$NF!Re1DV>Wy%$]Ԝ. .* v g ;YP?JxRCrg\ p6R(ȭܥ ǽ2Cr1S6z1"<V&#jl 5 DHFjyKG"g%lǟ#w5v%kRK&߳tVבl lL-Mre!7{Ķqs촣юlD#dz>"p]|_@%-VU^ G|qǩHw ċ.1SX%bxPQ eϭ hKeyf@u$BweQs.IR 6 HJ Sk yJX@RlV(x5E6XS\Ώ> I7Ny(G-Ç>ɻ0:= BԠ3OBfl0jG~?5sۙ|6ekpxݪ26NSᓐKQ2Niz7470ݸgG~?O"喔:q3D;\=Ӝjn)*m:Q#ǽף>G+/e%mW@+ a}j~L98}Mˈp;\ޫyB_-, UY&j$bɞױ1*ʛ}0Dț*On:S"_8aU>TB%Y;q]uKg}/yW_*Y`)9ahJ$锴g>qځ.b#BWW*Wo[?]|}(=M3ȼB]@ߝ qQG^XrZG,@=SD*ι NH#ֺ ;I5Ho=.14A[G*Z`'/p\hrl;M4-b{9s^"^Mhtr֊N0KcRשS]#Αw<+%`x`)QB}TGHeBfAc}D U6c+ٰcobSjb%ZCpe!.GQ{lΚS^iR~m(m#Ity3bݪί zeM0_"?t@._x<{2|:C(C.ۘˡ.;m`m j+;w/bYi#eIn柳[/j'yo>̛I^}P޸>;M9> }}o韵jz8Zѥ2=l sI6/G-5Z^=[q]mme\UPTjxW65pK9bH?b6@rC=3[3F8˨v^[6ϥQ¸a1XI4I8ʆ,AJ\3u\[/, @I 4rzb t*Q]FukLG~84Yd;&]3%w uʜ"=]/x>za[VpU0Qdf(XFq Q>25u1R q(\FIѺ{%DaY)FΖ^*!I%9<"CLL$?hH DIDW#ܦ Olŗ8,4T:hrHj ꘣ZξSaF'e\7K+7 aQ\ՌQM/ۍIlp$$ )ƨ@+ )BH'0"cکYb&u ŭXʇ)tD:D)';!z)e˕ 'b.ȨRhPz|XHd>C_k>gB)v0딓qlZ|\Kwf:`r$’dS+70v!ⅹK鲼R{2(E\<fx9Y<`نgܐ9PSH&ήٹVzK8si/gYjqmQ,9$ݢ) %C/u`ڤja%nB2 (Gip<< Rǿ_?ǟ*ç?ӯ~>z`- gDEQYD5bрlMG5Դe/f/ISΙid'id(S"c3L)L}o(ayU=}geBX)$] H\#Me!RDY"TNr5:[7`cw^3U} ӎ&r1L\p0հYͩ˻zl&BtK.,NJV `Y$a/$v' c;Zt:S"IXN 7(@d N֒҉"r-{m00hh(E$)Y/;YH>btJRT&OY۔=vAI'Xj<ҺP\ Xv*?ڧj">I=U!(_8(6~Z9o"?䉺*SarPyR M?%+cOb7CAl oPOPeڹD0I%0'OdyJy$ϓr@+̋g$}C4 "杁"U֚IICWoQJ ;WE\]+0v*Rr+0VY!"5v3H[WE5ہ+!"P"< RpJZ:WE`m;WEW8 HZ~cHYWoVwm;s= 8Pj?Nh JC40Q 0^{`goLf[#gYqQq=`=Δ0BK~.#J+웂iW &ij*5{#-´a. ƺcq \iWEJ8"\Y%5v'ެ+;îF0II7Wc <9\=O`{jv,qI 'bxزx\AW=1!* \q \i_5p<\) -ȻWE`"]"¶URCWoKCp\+XMvVHJzc-%ơCpUF*J*jh;\)MϮ$\)acofѼ4l=샩G*Hf|W;XT*S}^?=|/~g@Js 8`] oPzYG|7~^_o$c._Y(7E fh'+EROFiuG|*#=b(mu?^0F2&4UtD(ƕ#< ]Kq*\YW"&v-dm[4\ hEeJ* „pp_T\̉*qf+/B Px1ѹ(P*:h\|. k n(!T. 7<.&.䂩P*WHݔV3'}ڷY.˶-*Q..I6(<e  #dMLH핔K*Ym\NɥdO F>bQ#&.2!Gw^iu gϚ;!E%-B|wj1E`U!kdȸaD,ǷxF]3%IseWIuق# Ăf@!l~& f&4rq'jT-UT_? ߾(*?]tYyeDY$mˉ{,ٜ0`m^*òfTQdTFHxV\ʘ JY" M=t \ 2Tؘ8O#cs\1,lL3B0 _ Wwfb]NLl4*7]ݎ_YIKs%ٙ6Ō&%_k 22&Ѵl!Z`,N"vi `Q` ¨:mnjēZC,h0b7&ӈql j76g͢y=j v.Dd%y8U*ZB1)J&JFosٝ Hd!hɊ4aML8 #a}|2@Fu54F2fƶ bcq("#.״i}Z'N T61gm8ɑEkLH Ƒ5ZbJ#I"p5\pG &@"190"6&/ '\V&_gcZr(.bø(z\qq512)nD\IedVL :̥Kb[18ex(UgMLg^#vbbz #_+yO=B+Qck ϬGBLj4چJYn  1WS!2Yd\Jz> rC!3hL߬P? $9(fsRf7i+"]XaX a0Fqy`裱d1xOPlTDC6*Z鹎I8o|DM4w|H- c9KaRX_[ʸY9.kI̍~سӰ#IW`GYQ#4p4 P`U% be*0rs!II]4\% herPwHtKe)}bY! X[cL{M 4,DiJ)Cj%:0x+|jJy={'cM/:Gٟm^{s"Á38z@@FI:XddJ3QeC*SY$؈:o ,e-:q$`?">gE͹B('Y:g#1"AVLɕԱg@BB#v8ŦUz|kwNζy;voGZ}mGO1OD1gu(iXuEt&ufO`YO3V7 n͸ ux?a.g /R k:mu6p[TzShppHkXzIi0*5n/x0 %PZ= :FACkdxKlvlV ]L#K@u4Mm.,lA^h4i8 veck͢k Njmo0IvS)9]YRy]ŃIƯ*6@84%_*LU>j[7>)sOߗP adIJt}[YJr;F ! ^)0+mHe/,2R߇c8xLj쇍!)1H%VKG-OKLT8CB} ˆ^sYOu@PFDm*a$EV 1@oemα5.PNI XGEE.)r :lSyѿ5#0f(,"F\4rRcEU v`h4BkM̨3hE刊`) NP$F.97Kv]ذ׽xSL{l|$d {xuFB`tnfQsE4 is1x!9vqO#z;(Jx12HJ hR 4uI&-ԝP[2t@[32{C*}(Y} D]>2ud5'@ZϟZet&7t:o{vlBZ̶v^yΫ-z^jk٩xmx~:+Yu~CvYx1i5ǯ'۔#_/<$v aGPT ?6O%i>? +!o` shY17<jҘ:"uwHyWOj8mw_/iLx&@I3)s*IGu`&bĴ7RV-bQ'K ބAYgnj}u*Ri/k~) NkϸR!8NSUHj$ s#e^ $X0;p/!rƝb#(, }iX1Rh8 S}E y&Ng=!"}JZbiWT{)) `pC@,s@ch6٢^ka+/ҋ Xl O4uw}o7ӛ)Jj 5hxE^4yRL$9>RV&X"-:b^?)vp<=iT=yjQDx51hLb ֧9"ْԆO\5ܨΞ:l(ABj# "f10E>" 1y$xorJ^kip:t~[lnT.{gѹw2Rw.l@ ǧz__y\Ң9\YWFq1>[trT 7*dG˷߿J&12<ꍎ߽P7GgTTGs_Ω]mrߏKK`ZDž:._u1>^p6ZT 4~}`N,虝Ӭo2u/~Zn.N>|Rxۿ+S]~O p6vaAt7]bt@<`X襷6OK}+|7^Erq V,E) `924CE;A5*{5q?fh M=09+z!SV7aif7;gEX$Z.W=dc*?.ENS_\/@,G]m|fֿl "3g MBReˀZHHm(Nw(NZ>LjPFx~"jISt7b~56{ųU~z /w|{Sg'LRAPV[sOZ3%=Teթ/ UkΠm̥jmlReh%A\nTwqki]HD*JƁK½U_pPJWI]#e|1D̫)AsOgADm"w>eTZP2u4G9Ckj=;kp'S5XMJ0QU$8qDTEJWYַڃT+6^Ԗi '(FSd8$E% `xd8yvXQ©[iϱ3}hײң{+9HTX# Fe&p}T$og͟Y v@@e3 |.|ue62b`fUpz>:\k.ۦ J`@3X+` 㓅)U 'Zgef? w/ōYuqvh͉97;=+g.9?͒"<39Ci$<׏t4 iFaY8dQ0iJ>/뉞,N!8*AGiԆg!GHPKHQإg^1KC|aŊN):Fg^J?_͇'/^a{'ǯ_ip04@w'ᗝ`| ?m?4m CSŶZՄo1`m>røW||e-ҏO?_ I7[G?iif*%+| @iU]\tيpP! ,-G\5욾] r)X|"X`cD1In@Etp#1q,2 cJqu^^_cDLꃑihʃ 2HԘUg"!N<mqcg0;%`΃[J{.;O#i`Uaz&CBm楩><,ލ3ȩl!|8(>c,J+0G^3ӝ__ٙb3EKٙEV*&G hI$gtd9 AhƢhrpX-"i'hpo].fFhNPƖ欤B/S;η \Xz5xfOa]H.MUVم}9u} FS+FLg>u{ڗrxbSLUCsKFD§kػ:wFڷ40p:% LScgqK#hLb1]}v꫁N/RmLkc͕XlsE6&_*šoM sj p-q|Of(&hFH o+[YoOf%pT=MaPRpOF(s["f7o>7'i2OJ2I_}z!GU/<+~>V &ϟ+xa*zpp>_FRN!h8F8NXݍJ4T! Q2BΊo9{wgw E,\H+ p$j ۖpIOwu:ܛZB7(͟({W^*JΏ;ޠ~QlT܆LgTG0@ݛ\feRXVC pcv9& K͕gXs9s3{8Ds,G؂[?y$"kY>69#I+ `yb bg0z{,)Rñ6~U3$5L)qaU_WU?|؃9)Q\9E͝: eٙSTv/բmKhֽHp{(UNR{חԚDo)xDQQh* mK\aSXFsv`Px%?֖ JSŜ`sDyb+o@l_eR8Es 9\n>]^M|2^cw&_ EQx^k%qp< :VZ9>PBPe cuMʪ4R7&2vUuzj[:@t`TozgVm]qFd_>dzm6G qSQ' y'5*{-}Yx3 Չ8+ʍ,6ޫwHWFhy̅'6/daIV[h:˘Y+XNI)LSHwV 4"tY ΩP%F2tS#;N/`d[~Lz##cCb L_ߖəkkNl0w,\`Y!^qYQ^Fa0pr!,1G.v9l1e=g=L0ۭlJI:S ThrvFBw4}4m6$%B$CW\ ]!ZzBc+N*fU:N0*BNp}9t%7z %'-|=^jv(۶HnAWv=U`Y []!\kR+t()#H<zyVh#m+Dٶf]= ]qͨLɺd J ]!ZNWSҕPL뫮j=Q(WNz@B^T}6c#slzB6p|ll<W]-eT?E?n58Y(ûx ]d2P^Hr+qx h./URtg-GITC6jo,Q'e`BT2l$]Ϋ7@1U&;7I{TK*,:,/33=gq2[pCٮ Uۗ- m1&Y­LT" +14m7ecmgn ^>}v :КMnҶlÅڂTGWzj[~+d J ]!Z-NWҒX&DWt R ]!CtJa;:B *BJBWP'GnRRҕ$ $DWJ ]!\uhm+DٶE\]= ])b,KNǺB6ؕ";+ ],[IIeɴy5eȄh M#\RiDkZ#nF(iPц\z[\Dk[J쎒,UVRMOn:S uNWRCWzî=Bt`}Ҷ{ۡ'J2[Е]O1J"U7BZͿʶŮ:zbL!vI$Ut(53KJD>g;F]!] NzەUЕ ]!ZlSJ.m9Uc$e$ܮ]%LoxXհ>jyVDVپ̀[ɦilrNn,4:YNk$\z-seB.64hn (-1}.J IvU:tpI!_(J*ڙKO#3bB Ăa@ݼݚ[EMB4*I&4(Y,i&HpBHfJvB#qtV)YWhVr ]!\uhtݝV}9te6z#ry˺2I_^E5T[oN'AX'Y[,Xf.._sg48޻}zl6L!`\xz^k$N.̟-+z}UnA;2 ͍+\sC}FBYK|o.]]D U>.o]W FBo֋&󸺇8TjRuߋ솃İoDO=U%XAPݳ@^Ev!5ؗUA{/['|?=`ծ֕XMrVâD M3xUlrp<*8LT4!~8/168A'kV(o]PN[m!,2h ,J``=0 Y5d1A2&K@658^!KQ!ӫꫯG#0.,tvJ\ ,MY; ?٪:"xhiNMWR04o*T,o~(.]!VK+ Z0…VZ EQZUɼ7W]!LjM]1^8d. IA@ g]M"\SB|)\CIJ$em|, I( /DI`W#\Mc*oH(\ȗB3'K5PIm OM<:L[M H1ҹҲ\)V~VڅzU FKѨM:f+ME>b 2F-QDAHec-ұIqY~еڃ:Og/~^=C-Qez ^Sv.΢3ܫ.+ڶުo7rBj1bLix5\Ybek ǔ"Z ?4+a L7| (a6Y6Zr8%"S8Bu^8caNnwKuH-x^~'MVIGb<ף5`XE9a {Ny1Mg@8_Htd T A =&~^{Z Bb[8QެΏcbX,r>^f[7dۢiRo1ΆӞ!pYɞ+10V'oLJn~]k~kkCu|vi+0`T7՛mH@F?͑G d]Mz}MB~uհ[?YMST^8Wo"rWu;g_[+#w׺lZ״Yu|KǓ갏ث0|J{4Vfʼnk U օ F.@z˷ϟ}s|^/߂aߟ}Su70:|w@ܪyՒjoY57b>[xԫ>UTFJ84tL9ga\zd՘ ^.Q+|/P}M+ŸB X3^tw-͍x=m_M"U<6 mߖ%{:PredHs~[9*xOGI70zX 1(fX:Zr)EԎsv3)93TA01ƞ'ai8G; 0y{2c`kBzsʶGIzhW;8Htf"Miיwo٫uZ_C3X5== 96%l·ؾ%9y.1gV;'Jq>-WVkOMutggܽشM usVB7vj$:~aszBK 6' _51˵YN#/okܞk{)R2>4J^6gt4J|[bLb "$H:gi rkЉ5|$ )܂x-E&Jcnl9.s1jBL >ФQZB맫)|x]rb9!SxUWxc3˧.c^1=>n^s\}[)fPFuꢒ3 WkA{9;{Gjё#hH.kP Q,R*`RS2rT[_ {=N^&yV."E-hG Ija&C! Y@4">V\GDgkFCe.nd=`ʮHz x|yx@3`T >]<\Ŕ?YҗoM9KG?65:= 7d?յKAޝs;FT'= Ht1,G#à(PϼJ=FW֭T'TLC*&ݳ%Xa;~ KAn'WR j?좕 VEaEQp8`;̹>tǎpZ{ԅf`&I҂ՎZt9y`Fd` CByeTJҢ.)ך+=52& ByYNf8ݍG<#BNr}/y3:yuج~Mi;:~dcʑU 7I)LYPĈ`.W2*oJqZǨ\ggu-Ý[ÕC{Ǿ~rvLUjz<'ruݏ4@:> 20 3b蜎%}5W-ڃˀ$|u]ZFF}f(X72>B dVڒB' 1x*zBʬӥ0c 0Zy*#(q ZnޓށV*=>KY?V>?(=PPDPO3?nǏK|;v [%$o%deW=tѸRpǂ{_੢*]' +BP)MC[87xX1BCɇ X}hh!] m! 1QRx!{LU2} ZzK%~}nzfWRڞœ ͑ttix`6 (-S{fLc]tj:GtE;3_ ]>82pe蓑xE++ eSY”˚힍F3O*mK*].:D1B3"- U:_p}sfFEsJ0}&vrӽ1Hdix/`,!R9X4Od9sSE( րS)~b/Js&j +jV|Ukrvcyśa50{kFd䠚ǫ5R7a:svLhCQ}6/fFUwd|J|93U_h j?Q|5M]r4ѐ&T?E!2dISS8QdI(,$M-qL5i8GEdӓSPg(}c jPH͖2*la38ƶqEڦTd!x,awחɟ4L6. WJ|g- (gٕfQS+xQ!UÉ`9;V)RRt2/ YtRٲ,Pd8ڇTHvZfRno͖H\O5bǡ[D-bovY QE:+u6  Ԗ|YӦdOQϿ$EY_Eev&RL%$f 03:+޷Z3?vq-|ͤPboI|dUB `0 !Gbl4{:EoY:Q2]..=l&CXw&؆n)NvG~097#HO:a:ZQ::}?*}W ZʣwJG˗otELX:-J+tF,uBw1E.[O[)j2HPJ6K %iò {Dn*J&<鲥A$mYدび`앚Q\"=nYQ잟b۸9dDdX!nZŇKw8s byj/Msl=$:1$dJ[Ȧ'gr OY::J6oTJ|$ֱb_Lbu"AKEhl,Ti]FrvCVtxI[/dMwз` %{s"cC4\рoRgɵ8]1uh9~:lͦP>eu~#[jȾ8#<+]f䄊t,@[EdS-( d ݭXOlSlX{㭯A ={zk|D 4]iL-⎌3/SJ7 :tM($je*H ^WojoS bA~Y`0`}o=:N$gH׬%sO/ZtP]mT53.]LFIjݷ>U};gbeٹnラ#0J LQ.Z8çrط>䭏7Mw"~0[&ӭC9kz>f~kVڤ[GNc 66,'/2hd -7A&cBV\:|LKҕZ)u2*K {4[n ^>~<M_gV~m?S9f^30K <Ä}lX'|/]Ge6J3Ǟ9L#KNTYڥ lI$2hs-qLJX4m']bYc*C :fsVfwL؅]En]di]]w*>u|eT*ۜͣ;=:>=dǤc3M~7gȎG.˙?OwO(vFnJ6ϷtS`b/˓m̟]j~mNMƼ'w{sT(1+mGg|;Ӹ3?Zx3"h ttAQћbC3_c̻ԁ`plcy>=h P\V!h~Q%MZC6)1 3LH)D~ǻU]"b;۔EasQ,obrv~f-_]\}Џrdx#ڹSTJR-Ɓ($#"Zu>A E8E0!Q}`$5Ah(BR ɢ6NJIDhsT-az $+'$]A3KJRs͖GH-r+> #t'xtU[?v+lwHOox)PQi1h_iC&J 6g4^Soz6|68]|I)J2rd"IC I}11[lڹX Iiby*\iPd8.Ol9>[v% -Sq)>FF2|_ia1$)7''Hj3e]tv_8T0kQEMܽelZA?O,>;m$#XȠH v6y+] _F˯7CZ8Uiu=늬~/yr䷅NwmY~HvMbxv8F|1zZӢ2Cԣ%QlElIIb=*[lٗCˋԊ/Ɉ-3hev O6=58t 2`u5K 'TmCۚHO_d@NQM,A<=HokE~gW_j7]_*]NR9M7՛-9;nƓ١Rr 7yɏh-z98sW#4Ͼ@?gqW\H JSogN8veWwXk4r^&0eW.z6-QuSC57WҾ[9 z1;KJg]R겧$`=Si*nt._OHL)Mٜo9`C4 L'Sie\36߷61m5 ӏ?'ρZwhV.;y>yx *38`BO5aѹ.ڭKТ=CZ"Z.}ii[5ڵP&+a:w -"/\ϴƾpڀbf?uE-#g=6[_ҔX!RjDzk9 \ι: BXV3؏$uB`c37Laxsk2iWR28&ga8ītU:crUrSQDT F'NH<0i53۰_k}gqnȞz~})ݥuVO-7@fusUx_HhZpg-ƯScZ+ԻJO2Y˂b1:}Ig*:#<{WoO+V>'_,Q~zCާܵC}߼~;/"ܭP*7};Bf2z&dZ f"'OKqY $R U=tQ:oU<]㳳S6nXXpz~Fh 7?\|ftxNx:'C<`|tF:lpY`o|NVPO&_˓BL7t}+B+ԮzOWitRCVtEhu"v#]AW'uf=+7 PGHWFtַ,ڼ]=p8x6V׌kx4]ʼn!hC-?_~~t0E3Zs}0/*u_Mehdf}iBkܮ4tbOӏeLv#cGt+B)؞!]9+]V7tEpuoztc+ogGtEu5+BktnOW_ ][0k}xcL=0]mˇЊ ܱW ~{Xh"6C{/^+B!];=+}o"c{"^:eGtE|j3+B~OW^Ѩ`8o5 䒧 Y] Y嬨t<*-Ky J(~!2lU1bg'7[8<3M $3iQp<UOYEڠ2//UFp8jlU8~9NĂ(t0tl8My+v#/D>,\աJ~X1)mq[fաJɨnw+ WTdoqXeCP¥´9 {>[YY &ҕg>+O 333f\ZJ!ꫡ+ngzj#^ҵ!\K6DkF]mrfIv{՝Lrғ'_yeqgJ?.GSIz:OGǹ5Ęd,I4EFNw9Z/b"Yr4!IJ2ClZhb"e)DU,C؎ZHTRsvuc?ѥWUq&2]R1dXIƜ` ϕ*\e< ^Gc0hѴNz/զƟ?CRRZ4je֪,.̵PRXu*)ޣOµ$=r _ B6fBoƬ:1ZsS\%Xq36[C׶pGs8JlI%Tg >$E hCW+-J`R oD< Sazm|kUZ\5ju6:1_0W*s{ DlpC3HڟSB*̜u!qDMk2܂}GA IPVzU▕X9P՚XA(IgXUƀ1Ɠz 7:1ܻ.0y+Z7xSL:&#q &v"RE (%) >@ I&bZV"!}#\Pa(vF$/. EdusGc8)C^"o*$9͑%tãls$N;%\ E˄Hv )0HH D7{Ƒe ad-!X$q0ebèW[)CR5n-K^-mfuԽUTYc'0r [J3,3 "G %`y$:kcIɉ FGN6Ny#4+c|a#;e(a$> ~TvFlV00YO+:0U lKx̦ ,DhI2܃6'21V͖Zށcu:U`%`CY{&IS'6$@8j\O@;M7׬nנLKf,K&@"TF =Zt%x@PÛ0(tH3- `(8z}!Rz)a:t^2tK;=zr89 U%e`ϡ?@p v3X->WRT˥f0/̎hYd>4@"H~dͱŸ6)$ G&H;(GtdqP`y`4^ 6əs6</'tXLG09prxF<ٰٝsWeh@`\*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@߮ie@L#%7J @Yvh@ P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@_%P 0}R r@W(f.y%PFiQ J i B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T4}R`aF h5y%P(*Q B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J Trhׇkn8RW\_R:~&=D 0ok'Gpi0|KR0.p鋦g'UKY_*;ZF)8L;o]JۺjJlg^6+tꝷ2J ޼`6YI#yG(?bZI[޾{G4 DbriP imGteH֦./thyPrkvHW6KbD*YpMoNίeۡ+zæ(NVfLWk# q6(5rނ(]RJ]ehBWC*jQA4]`CW}ֈ]+@CCT3{DW W˾UFkٮ"]!]TLd;OWRЕHpTs7~I>` ~=Z_8`2 I=YȰ<- ݇dY8Nc1T|ni>VtT>WZStUQNbE+Jq|v"c d!-OѲp|0e;.C&AHt08N栃hrh7:E97/?{b4x_,Bp (1d)鱂pz,T|2J>|TMU(?齂{v[u_Y&/(e!Bo u2je:[A8|j|%)8ΪºX2WkVxZ&Ϊ9Y<32hN/ M;W -Hn7h·dW*-67tp ]e:]e ]#]Im[SB#eGz@ye]#۶?A.}vi:hU#MN!+db2\BW?`v(ZHW65uɺM9u>N&w>}dP|:+Isy eug<x:JM{ޅ-UR^Uh?<gOtX?㎈2#Eu%my%eu~s|s|]iAQ̣9;nO"/3z$-W">C4Y+F@Yfix$PXyuuO5e?&:㪣՟JuI ᡕ2ߚqOD-KQt !*chdQ 87dpOVRRdoM)zAyohuIy+YU"ViN]&|1 `5ܺditѼ`TeŴV`D4/~MF'۽$?xǫ|gɰy`~8v"LuEfǓ&e]oʚ:" *nnfg=.<6ViٍAn6iSoٞu ?F+ H]@9uQޞ=Tx~M4:so*CHY@ʢ7v[^y9RndK~O7Gx<8:w óІڑ@Gx4ó{<oU~]\t{kl!Mq1ޓ<=\Ie>Q+LvS%Ӌ4ԲeWթڳp&q3'z"?뿸JtfȫrK^4⚘v  ߉-/o95}7`ŮfTX:_J[!ZՔا?;XXdI*U(JG]!R, ǩ.ުR;.,¢ڹRgCk|Ubf4 Xp6]۶j,iSET|u4T/Fx4|#)'42iM•!)F9XL ]DBjgpn:.0~:&]ZQw[Q]oXl̡?Nt.!ۘ{=_aX_|Atl(1)ori7iD V\DE KlJ0 rӍsuS$} uDRp,cJQ.h,%0 1tJ( 1 JiN8lԚ3 ;OYuuL >zd9ܼl|8C*omFwW/Rن4 H9ҨeJŒaq3-;iK*Z7qGx&qDH=<xj̫2Ϣ1%LEO-s{t"ie`*y:al0C{)*8|dF2l C_IJ#ŏT%{$Dv€Ib1fQ٨_/j^4-6ٛ]ԭ=2" ; uAKOwT)3\x.ULQmJsϘ4JE'KNUz)&^'{`(?󴨖g~].w7.,ٓ/ٷttV'S}SʝO .y_8JhIx@#P!%_M,+ӑB l9@)Q\QxX!\Dx ٬) s8ջ;S=NXWEɜT9JXdji`XRcB9*k[2 z_,ip>゙Ela\|Ba`(B̧yeC>_mӧ{>˧{=#lƝ\M wmI_!;r?_CɞApgSBRvW=|aMdžiif8]SU}UD$}9A*kLH+o*96km9 9Twn9!<K!tt> vҸYN2#ug`d'<۹vFٌRW|2CwFꪶ' E!QI(Աluan$atLR9\"kP 9&VFx:u1Z-=VC$3%dGBDr[dRN(3|999Ta/!!Y |㖒PE/Rsf9 1K *,e&WiEՂߔ2&5~MCw!TzJc-yelf97X) d|V Nm4Òrkj]c?$2$Ch!8 H|0 R4YCHC`,GAxmllz $3 ؅Jd`P|p~|毕 ,QF yh28m]rx::Jo"ǸwRZnc10e [Kk'5^"L&"j,gDx44޴}6 x-7/hS-+%A1M2k1f"h>('"jؾ̓\:Ea4 c 1 &78PYSrrX-*G"1DՁ4%iҸ#z5{ϣdV{e#hIMY$߇-GH y3^G 31eRi cN;!UbQsG4Щr{CՐǦI2xe B"fǵ<5@\8PIc`AW}>6wsm!IXßZКɶF&m2#_7tAk0ŌS$_2 v6XʊXNyʙSK'ʙ^[)gklw:\=z MX/y; /if@˗ 37Vd95ltc D5eĒ'O; ;M6%H6PH9S%ʗt9{bKЬBONv$~N*s']0n|አM1u0fdB)M B3rCTB$K3 HW,6x0vh0mwo,^sa<4XNPdkME|*;*z7Lʭ9HӨrAuyPע̋J]Fo{R= d+8nWYjG'qcU(4]s>93x @d *8XJ"=:0Ub=_tU(W䩘2Zu2xQ !t\Aɂc\+52g툳:5+@\tǤG(SbcjQuY77 [vuzOf P|q0Q@xWi*o?aήՆ~y:R"Uf١2/unx3qz A&G& 9Lى T9iS>>L d)mw{gAڠ 0{c.@G\(kg>-W󄼼u|gjPKC8)I5as=FCKmVe yQܦ U*[de286J`ʹlǪ}oũAJݞ"&4r<?.ۇm貇 n1?]\sm5crOc%F%uM{ ],X q)-ZNz^4*8yQ1 m,)ވ>AtIKO"|lw⬯`t2$s־-I)Zjy1K-[ 4~pc֜5TVn'MN7Y YoP򃫟P u"+iNBLX TʇTb(cRn*2,w%0pR!8\d3˜τw*hngXV灄^}@tT%0pM0V1`2fKbHZݛ`S}I!Еblv/xD/ӯȁF4]V!v?O:ϴlEjS,GDdكh8m$Ear6I#dd8Yiش*Z !tw ghe[bi_*KH8zk|4qTL}MӼ 5I4.NJ:N҉+}(L"ӽ+tVCBi-N`ǣUrj9t"*kg WZ(~AX&~:+lI7{?)ooIK0l- yz}Lg(oo~I$t羃.i'ϕ5G4=/>5ۧ*,~;O~R7e]\Г?>!!㱉]_zqYf{QG `1LҮuT2 cY'B5AɮT1cL!bjV(kZV;k9'K9nTTyv^jyBq.V;kZV;kZV;TLV;9ZV;kZV;iZiIi[@^g޸>Ϻw-!=XPs b.q]qP*7u:9ѧ ªlH2^{t cM*3H%)aRƠQSSVP'@Z} *CʒpϒpTʄZx[E#}땋iLǿ]<棝d2[qa ]YŘw~S3 m^ҫVA`…̹+"cEMJ '>*qN'ǹvs$xzzFas!T92_Y[Գ~:_ս~ 6EoXn+eH%]p]˲<ŏ . E>ެ;L>&a@iYtgWζ gjPruh%? AHd:E4Lcb!F&A٤T$Z K2r:jR)ie6XieCp q3ˌgw*hV#gOr+dnic-Ogoó{ *p,21`be"xPL gH tus'ӁkLT^~E1A !1B*i-pcsk[PiMjivaxq1P󶻎q3xb'dBTE5e4*( ωRYw99asҖeEUdd *O, IF fYhI!Ӊk9W3TmdFndTjq_,t6,|P,ÅZq'Q[gٸJܧڣ fs,>SǢ>8"|@=l4C94wN6I@d)WJ>&JgiaYB(%]qr VlY^ߔɣjQ]h68?]V;&o}iU=4ˎow=_fg$9emJ182:\{C\ rT0* .q 7DxD(d " nJT=u F擇f7{\_o3Ete[݆9{׭RfOxRYE,Xn} 2K ^̑[ѕjShSxlгz<5 dXUcQwGsC2ښbTA8*|FA䑑X%лL*2$"(R1 S9T4G/SI}5r?B ubP2Rj[vðUDDVҗjxSЍeqR"V2%BV]G$Mg)mBRqƌ1eTdFZq54@h2ffsQ&ÅɀLoruO8G'_?|"f/O俳?X+$/'og~9竅(]{'ǿ.@^բ/E??{2\ŷ_se/.\ſs\'WCOQ \}ݟGԮm?S.p9K2UNj/u{?ڥqt>~$6A8Q(n.Ow_?|խW໛G~CsOc"L7]N x[[t@_ng G^^# B?CBy 0Q"tjWKHrx2)ϋfs9A %tycP=M~>dѫ(nڵ\_곿R]dsi~\ &}guzo\:^%A1?=(0}xz饛6 }]3߫[7H4)R[: 8$^J7G<\"?%awN09toyƻ#z+ñ$- WYp^^i{^NFc{'1.qw`ef1Gb;u]1k΀BD G׻|s47>7 9S)XPzX 8څ<$QZh^*فEUThBx3neRb,Ko%4МTY0`ʒ"X k5] y8"x~  C}]ۊ?mΫ~cZuYA%?res |5 RQUlJ:YZКRW @FAN‡4eN#)u!UUQQ*K>%.#" 8[o*iLB9C\1]9{~+Rahs7?\7WvWlxVn]NWV6UYsg"KA,YP*#I$ŝQe1GUQ610;i:y[DҞj01QY@үp78MXIXBflmdLk$Y->`;!h4QlD ŘAA',栻{F t YJ:H}9z΄"EG&3ǡp!: Z͸?p\@KeV8eH90DT!#E5 Db[T[ZgnʘvLAAfB9,qPgUaPx+c;$c;Ci´p+A;܁D0Ķ4NpkCο8Ss8Q6<2-Z#jwp!l4 KC0.YTHCd \ɣbԈx&N[Fs6mW3- O4W|lSF$F*b1W4 #:R X alپt;"E )*:&1'# ͸J&x0 A ^Ϋ{@0-AC(X@-I+Sr5 Hǝ{gBo\d::?>3Y/|.|T:p6J0/&ߪp|98kN#pn‹ d0L$cңFb<gr -V<xa^y;s9?>t|o㗧~7ǧ`؞ۗF`, u$Iu#߀[]wqS]Cbt|jW&_boh;nDI՗=3*cD"ꑂ-5Pq']I]|'?A/˓;UUV5QE.}BX3*-zE[pvkbI"v`Y2CI&I0aC3jYd(l0Qyy!=m QL ).ȄRcKWSm( qBhyDG(6H\g 5+M;O#gEMYCԝBm楥>>,ƙ|HX6cwE1rQyG^3S?#t% /CKx:fD'k"ަDCECyI%V*&G hI$g{,R11^y Ht4D{S0`*hM,½u)ŽМ ι؅^ޥ N.̽ xjO º\Nyxڭ矄sLr];E0Z^}${U`@ɍ۫K'p?l瑧$ P9  OAsK:F͚40Q LScgqK#hLb1mfN?=ěߴ282K۷-y,|Cl=MyZ H\ ttVL~YS65y=\ykFW~f~!|5 w7VR?.FTJf<EN@Y%.,XșBxQ =\1BZnXl0-Ъ"o[AI:ܛZB7L˞QTEJʇUw>t:Imj:^KU',+ʹpcvݿNIM@+;}+5w7:cʴ*ߋsH1pЍ eYGZr; r?d떹[>@W}677Lu}ϳ^!;eHc_j! S,#i9C6[I0t}~ؐu>.AP$+da =_rn~]{5Oe)wrnAZ~Y*i^ܘWZVӐ6|4L6eX9Qw et\J[HP)ƚxiZ>^%Grt>5`NQȕ[I FR3Asi9T-,w;U ]Kպ׈m)U^4D`H#H,F4D$ fVbn*&cF,rH# be}H5TH0%)c#g*Ck\*>W,apIa u2 ,Xf#} SNJS+hQ6$j(e. oeXaRUZĨ8*@`#Um۵*[<\w&8M'aN ERni#aWPXn->w$PT)/e$W2:?⬉b18Ŝ9s˭#ĂgeP-}Z@1\#)úQϘ8P=d:F0)j$l#'-ǧk{חA2kLm>B ߚzcaWzW]=Mx ^n&hRu$gL<s!DsMږ}+*67oRvß]kp[}dzHi4jb w MT~DQW~ۻ/ n§iṢ<,/^ oR"P˛&i]'tY@9Yj:RjsfecvaLn4٭lm$SLfGjAl X `$r ed!v~[)p-8!_Dh` shYuP, "JckFvbEa ý ݫ1hjzn 4# +tTf"FL PxJbFjvljG(R{GcW?Ɠv70}]-by zgJhϸR!8NSUHj$ s#e^ $Q Fp/LoC6Rd6#Qq4K/qd8M2#G(BdȻ6P3zV,rBp7OSi;[T1Iw@ՏjPw}lRL$9>RV&X"-:b^ú0# lxQ<ܷVi F@XG,񂧢0NXOX㐈J'=cA % "HFx0&{C沕Ƥ~X6..52фᐌ*idױw  KM($Uۏ.q0ҜLVqb i#H"H°`fS)@2j׫+F)R+/\h#!8L 0UG${$&22dB(.Π#?Ż͗ûףK3}4RrRפ݅@g\>sW?MznI˿g ƣJnG?kop㪈Hr07)k42Q_8/WcX\ˏ&堛:L~}J\vSjhh~2GNfYsN.lqߗcؿJ b 9_ .MʪC`J,虽tYצz@NOx6e!ۛ@!P'Ye+bL)'&iJ cڅ'cL/z+M'E/#Z߀bxcօM,R̺J/U61u$@4Bn^ϫiMO~e?m4OrAc[1.͟VG?ˎ+ytO촖2B$du(DHJ&XG$YeM"()0]J+.} J%.cFg39rD^mg2{pV#y>挓*wc4r:k-[ E& 1 d =;z|}S釣-}ԊȍKk"k,@q[IAeOmvmCOv·.i{?.Lxx{u>FQ~Ws6#J]z_n|Ս:{mO/uZz@9w|ZO5T]GQ%W֪K%!o\]wߟfK5[Z<`lUGYUg'e[)#=ڙs `r&$YaS.,0+&8yxIE\`bO!wl?mCY4A}H31g:`#5SmMzFz*#풧3(GeB7N`u0'=sM@GP z &c?=璭a nuݳjA]l0I9Mɒcx_n*0Ɛe>Wi0R[]ԉj%<<ԻAV7b>-i]N8:P2@L\skJ piw'W'UkhXs %kr-rxG2 gu0Qy؝18k^t7~hg_7E/_\].T;ds&\)`Ĕ d2IYqAGHCn}7Xy݃b^f_+o \>x >]!mW-s Grx~t;d !Bt[[{Fcm3~xiD4ζNL*oS).62vb9ǣ˻FO^ o6<0:`g?d۬-J "^:-i!u8"'u)I<(S~&wMՃJa6 j& 65ax}A߿~?w?=#`{_}:I%ȧ/'-@',OOyWS|%m.z[kT2Cb1odHwnnN;Mj" jXRT@ *)LE|vd_nD@J!,3I[26V$ 3#|_:tK *0}FN(\40Ԍ!9/,t>[8-XFbg٦5n-Ύ.օ YCK7[b2iYl*'.>g_pUSU#>RN0wEy%$t5PR`e8=}uvBd{(HQpLD.CmQ9IsSTi/B8hrb_򻛒$'_E;.5/Zٳ{(|n*YŰP9lPIgAIB 29ӥV:Ɯ=v¼3YE9=M0mr|VUAh4oX'7q#a.&d@!hTP2w8Rnyg Nfڊ!}SO`/9$=iʚg_~I5lϫC'U&WfY $:k>,PƋ z0jHB3MSē/=ġ[WXCŶu~ӵZ6̤s|TU} _ήry{>'u}ss;M&Ac%mu"ݻ)~߅d@8#!XjK,wH۹~~sI\/q*HY3thjz:$gFP?xVJLS,sjq{ܡZ[N;PҽisVdD[ ,+Hq'A[Kw DϤ6_(e$ %!(a*eTjZ'0DBgL` <هrWx 6/aF;K6)!RkՃOZx>G`('BEv&HBb+M:Y-$JaY@]tc";3/@}v{7ܛvSa;Bә49mQI`nTAQUWj*·m#`K%%?}n|%uR20:eli "%/e/sʚ{+--UÒ i]C;[2H_8@0gHtlUi"RR$J5`:a&er͆Ll`4LJl!0gGƥrIFȽ"$2DwTPؚUe:#c+#jLo;a sWtyI0ELWQ.BHDT*cI ATJ9Q\6B}ɞBH`__bH y`t}ų|r4t!S[BѪf]2KT\}6 :%\愉12f'Ё $Hїbɘ}Z`w!vAK/o&62 ]y*́T){ˌ#|:Ls]:|T5hOzHPU9@9 WRk,t{lVr;}/c2j)H"jkL)_*RxEt\+BFKt'KRW8~&=C{єF4۲rZ~, ۳m[w[=;Sԛw9]f3.TZtQFZԉrP)t`!钕\y*ɐ*R}e9CH*1m=6ffoEimSsT \h-^эm-f7[}n3$~L=dlk={tt tI4GGB> /i_ҤR#}$V T@V <8U)K+ +ChrɋцL'%X\#^:Xu+")xp;DRk.YQ&lA=wҏ2Nl9Cu)@o}^M2)ZR> B/?\zծ+tQx۱iça(q#}C'p2mPpcPՙb+n8J^|Fo 5`,{Gŕj_HzHFU>D._:]苬+׻w?e(QJ[9 Dj\,q^gBT6\B*˜IaPIc"  p3r\{D㣏I@e#yl$2C'a]<[oCS}hɴFџkEFxkhZ-f\!$j,3 18'9\ EQϋZ7T$52J1hښ"uI{ST)Ƅܢ[؂ނop8Iˊn[+š 7>؊nF"FI_ Ӏ ?n A6 !mMbE0eJJst(tJe,bY\"qU8:BXmZ@"A *Fi& sD M+rkʱ⋉a@w$1fyl|"[ޥJ) @&D(.F<:⒀ * 6=DFtT$70DEUy#84ŸZNѾR7x_xrg#9YM.$LP c;MB?)DҥOWg=b ,Ŵ㖨MIY]=epAc$*0-λHIXeH<7}8&}ĨEWSN !h0<ӍI,dKtu!@<Qτ۠>4NC+XL?n0"ҊWi"NIS%CV1e҄@!%)1bDZ xBEDDNM򠀥3.8!((HVQǠ0"gN̓SMg\YLKnPYŊ+ɽcP Pɉ:jY@xMxgZN50\ Ŵx( - aF754ijv\9 l$xOُZ{t XiM/1,.gr 8K+R*QǀyYyFs ,% |OMW|Q}? ?ۅ]OxIbHT5]#6򜍆~=euwY,!ュwbj#jo 7O/-ԧm =;dxbМ #Gt46ɈG1p|Tn]n*s"Opx> S]v\?>3\|f)`3(;{;;xw;?XX<ފ|(Uzu6R_Q: 9!Q\SnC٘B"*BQ )\rz;|75&sQda E_~6Cy==?ǿzI.[mŲ#;>oN[;| SB7ybݶs .3G_ncCUrX 7?Giu?#:_o0Sʽ^+z^{r?RZQ8u2,]|(QdktZc49VSô>]jTo۾UC lJ+u n4ȾߗVhq!50`.x7XBqm$ aR2͜57:Eb xC =I\w\_f;7N[B(ʬPW*JeV*Y2Tfڂ91r T*7P@rUn T*7P,oY xrՀrUn T*7P@rUnZ/rUn T*7P@rUԮ@rUnrUn TqrUn T*7P@(mk ^hvUR7BOR*ʊcnh\Զq.*NY͂K[3~#gwYT?JR>YI6(%Z7j+ pm1L.BcL V{gK?trf>KlؒS|0ڥ%pg֞m+-g*ڮ8pTűzG)޾wtz>.rK5.QP‚7aZ,q!yq`Hu%bA|B'FIAS.2TD[(8[6E8scc I$2A3⩣FksN"<҉(Y]Ep~VfOIxOn}Lܓ룩8ÁpRVRtq=y5gkt ժn{W {BW8kM*7W:~iک?|w8nMÈ3a,20js?&cT{말/ 4mvߣ 8XɎOǨp1aH .&1mnN-ϳK|Q-T%Oh&mti Fϋ&Y3[l;T%o^z;{TN1PGe6n'O=*Ǐ{/n v2N^h.[+ejfݘFf~I~Z1"F= B4xu@5I8߀LrnόU5J+ZQn+e2ׄ,h 2\JB %~ rHį_:|{NYKRs1P;5:{˞Vtoxٳ /;J+u^,%`xaéG08[7C(B552Mj43`k4\hxGa#A@r-}TERkͼG9Q ˞;:5ŏgCU_CgٻevM*yv\Rп,]snsܶ[=>D1O9$][7غnL[o`y՛+Snb[΁n{; z^hZr]M}p1Mt~B̳|M N-\eUs6Ylgymv=|u;^v[J\s+o c5??[k爱'`BTH$Km1r(32)k#H+Ep~ڙ䆣amNm=(3dAؤ#V+,q&"1U6QFbCsbt{5k]ڢPۢKKvY^ӖSn1aoiƦn<8/W Z {Å\SP"Vj*H$p‹TY;5PG'jdg TIc 1)) jƼ|ه4>P1@#@~*jR 4Ntcehw*GFcdZ_:8wU3Oo)0FOޟl<<[b~rve+5d<ͬ -ːb!qjSA&@d\ qSxIгz*<4qhw=5{ǂ!F&YOdrNeBJjB|TtpԔ.?xޭӌY˿~>:mc.IՊ8;|xv_)ɧ\\{/ߞ_fM^ų~3}$|i.,EGmA| ݀y_ .Ǟ/[8~~zs-ukߏ.Ə߶ bsO^}^|2}ośWy}lEDޢ'_~A^ yW~9|?B~wI!4 6M`yM?g]c\[iUՆvM[ȿyjb`zSr#zS1^N9pz}Y=tz<8INgvij8Ճ_RAMbgʚ8_X;n Y9fuX΋:I@@JfU7 AA6fXnteWef嗕"'ޙ]_|A<qJ.{||pdxO0~aUP.z )RN!7bK }JWggA]&+v0Ƨ bVָƗ!(Gu^(!BN./q{iיUǣ"i0*e˄  ?zh )7  p}z?htB^sb~P>cbq8] :xǿ=N_ B !/)@zȭBT Jz$Uk=nW=vY^r \^Œ\|ɱU퓔[V;iU%bﻒ5}yVd4q ]' [nMg p6_,[7\[&Ne1FHC? 񿭝ZMd5SSgx04_='"pw &37CnƳ":n1n!D8l# ̨w >*bP:%b#,9۝,U{A Er4#p$?BE\xOFRp2uLF%74ч6]A)EԒ)@ ~RUZĨ8*Ui,ɵ0a~A1 ԍƮqK] 뵜Uʪgc+qpU:D&8V}K+V#9XfDI."vy:qGtiZ:D@E.d88C Ja1*0pK !!Gʍ^y`OFtGy.'C KFf&*=gcM$yn7C QM$; 6qBm,B#iiw2W2!L Z% 0)Ӂ(a20HTkK1%@2'y՟1j1(jk2jy?lnWa좻=91'Db;ņ>;_yC:,RhK! \|&h)ݳg KJ&X+)aueg0"T4@.$qtR2*} fhCJb #5(4vA)ıH1W ]oYh 1rfuv"~@[- gͽ !l XRkmNW9% :o m%j9d)Z;B(ڠ yB6xTc&},5WEr9)c 0 IzE!   b` BsYWC㝌HZCی[ xw8:w35emhUlg w'%#ʖ`[F^kcYyD-/є*,IdQ! ULp 'Q#@8594ڴZފeDs΃`[2"7Ra7@(Q\Av@ll._öŶ}HwD@R@UtLbNF  qL(`PVAyi=q1a(:.밿HMx`!a 59k"%X!Ya%Htt,߹F8A3Z:i 95wXA(!jCj[b5b'`NZ09܋ {n7U;d|1sῖ| N s=_w)R0-ru'0؉94Ezua7 +AF%yr<اZTV[х)"3;RL]L&.{)^貾6[  85rTvr%tN 黣<<>J|O S ×.3/f f_.:8؟/TkzCϭs xkە=/d~?0>ZRoT\y=Mku]|zRpl$FesbN0'R% 0iGvxbkЪ}UÐhreVyR_((ir6ѓŘSGQW+|>$⇙D>]zW,'xp1riPZ:]Pzf=Pq?]I]}g_)6y-W?FSDV;Y( 63j3Zَ:1\v&!؁Q fh 1F$$P=LǫASfHNzjLYzh}BF ).ȄRc[WS qBhyDG(\svi'ږiAeMOw`KcxU]3oSw=JyTE$X587GZ־ {*2'z/u ~g&#LEgS8/E0h<(CGK|E-t:E~zA}q8`PY ߙJ?U] y:%9gE?Q ?x1J# WV~J)DNeAqGQ%`?V(E)C 1f+ZHh=j_hQhEPX<`¹0i BSC܁[l4 +ЈsV6ha[[al9cZ/!vxM?rӾLJ+LJ@LJ)RB;JJ+*Q[evY]N]@uR!u#쌺r㻢Z!UPXg]DuS.A2wG]%rĒ&jm >^]%*U^فC*ΨDVFڮDt%+XY:X'\|*Ԭ\ֹB>VW FJ 8=@+hZa68wxaITR9,{KL%i +jV%Pqմ\]*XNB%u zP KTW !p FAǔ'-b&x/E^5H%Fr.GХaCד;"1̅+dP, "JcG- 9 FwH7r DޙVG$*t7K{P#nWW!!ğY]=\̦èyè]?@]N]m%wI]QW\wE]%j9kJT ѩRW`vF]%r%uղ w%+M“z7Y~9UQe Χq0'OΛٵJLp\"ͼ-LU[Km+w4귽⻏g`J\6I?OErzKy.R0{U(& wK^jQJ' 8XWcCbqrn][!pלztg&=5g<2`K.-2r5Y 'A(+(EǸ.gq4댖'xUzGZVKF0L`Ɓ竓(eb=(/޿eE^OXwFwWgGIaR:WIyRB$1*"w 0WxeAUx[)+vF_ %^[\mZR3_WAql|LS˪ s#WWcR, Co}sdK+{I?GxXӣi,4UUM|\ў>{џΣha]=kylѾKo_26Z AW/ˇ[7g0*滳n+<6%MZȡbM } ƴV{ħV>CC;qԏ=c+$ZmM~F ~)qOĵ¢7>&|yy\ym}[ jDNavaCɆȯv\J.D[>jeU._? Aȷow_YəoU뽙1^G{\Otݷvz)YUe%*`b(Վk<=s(r2>>_1dqSaΨofoyx>fa"\喟OuAX,!ts,o1rbD[l6mPo$(2ޯA+:"zvb^n|kmF s?6n~q} {S <䕺фvk_t)^VkN;/\"GZ\v V7+i}#MDݴ}_D$VK|sm5z͊H 2ʱA 99lr,nN~_&@7&ywyt:+F6[ϳMUM-~hSd)OfՌ ό`k\5$sUS‚Sĕ@~;w3΍oJIJ}f{Xa()\CP%=wJ7.>AJAp;WM6qT)JӔ&uvJ4=Q΄pbp`!92#8Nm8"1*R3+yxա;U|Ce 掫rn+ps?UɮZ * NWpnvk!Lqe/:A\q#\AM&\/jj;Jq NW'\A?8y@ NW xƫcՙRZ=Ԛ;ǎiha L7͜stSy b{WM?Yek\5玫-:E\O-&8P7jr%+zj*MXpu"#\UkUSf_ 67{={޿Q^5(l̑q5N bTcAׂs?.g^{nW Gˁ3?See7l],;IƘ -'wxr(#["l#r ڎJ& +jާ%vSS, ~pR7jjWM_"@]Y35[FU~v 0wX7\ҽ`u~n*9,>AL{UG`mY{_qjWAQ*Sĕ(t&Yr7Z?U|MeX׃HO'9VoJ=Z\W^+f:U,\v+WMQ NWFkE#\Av\5+5ڎhJZSĕWM0np{j0쪩^pu",pt=i箚ʰ+g3=]A(Wu/Zcq6W'+6x u?zzs{iJaqZVfv sob ;Wnog(^sG5bhj8d`[`z&|/jjyJb8E\-&WwCSIJ4Q_ h8B|cm5,:1#\5>t&gQ+J㪩ܚ\pu:2d+U\5+6W'+K{Wl?U|/jj%WP)Kvu"pZ3wj Z掫‚SfN._gdiU*tsԨm??dL- '15N_15$b 7ʲBe}4YsQinJlȵoZ3*E>]G#\ pJ7[V;J NWlNT3{լ):7oAVw>=k+=ݸ`b gM]"C{De1MUB઩}ҩW'+aO3xu`gm7jr]7svM=--z-"IQ^y}9qy(Z96瑮q*W{\bzrWu+A+ G:sJq NW{9uÅG=-#z?w\53;\x]OTjrm7`Sdj*gv@˂Ķ+\C#\l? [/Wp?c6h|./m9Ϗg^\^Dk M[?!x7ON~:_?]pAnM^y=?~mzFsoYz_~lW9tY?5"_|@w/WwW7p=[a&6~n-֐".孯c}WF~ڧ/.aқxjn.sn={r}k/pGK??7Qu-mQyq?QE }@t0蛧syc> =Gh6#? {z{[(uU/ozVϷq|FCa9Zk\`Kt9I۬ U,n5g-|ۋ݃$?܃}M-]~Vh uuPor)J0$U)>X騜%(IpluCI^LtVI^ u֪XbqR1X3Xh*?tF:\C{,RA8܈=]4qPIՒHS-I)ZcPTH(k-M%6hF?h;Fq}2S-6XKc9_R]4 和e?8T*$kg2)W])VIR$v0` kx3reM CIHSj!ҖgzgM~FR04F CCG6 7*ԡGJu8QBB}{*6YtȬ=`- ^@IIqH$s<>YN i 6%XS$G]`m5X j`)suvqYлQcCcA?ȷJ[m)*NTCH PU,yL CU.8p*I@UhX'NRm~чdԠ*zEV1Z3^8bRt= BAtTX5JR2Ud4>#KlhH9X E!SNC1Ȣ"x=+Z9۱3 ~R7D)R!˃:N\ZWbA[,(5ks8)^b,TPs1c4G< Z5h+R(]&T`=@TH>b=h)Pk2#HIJ\P-V̯+^P@." wks !$*VO:LD/F,`VMs.BD!2.XrAS@oI56!ץEkT[]w*;cg7t0P5a7^-y]\bNsL-:@Ձ}HU^0/\:-?!8=෶K:!"R]H2U[SWz/-7|7_Sud 8dl2?xXlC7e>>o4H{Ew{3&/+kzی `0BU3WUVkJ ^ \#{ϛ~Yvjn ura^ y6ܵ5a-u Ab &@_'Lhy_~M~ۿMN,3~ܣ;;&\Hwk{k`[F\?1ME{nV G0a6\`'s/̎;\9ц%•gK;"j{=L2WZwlz^g_bZ?/\Zg3vWM9Ύ`2|4pun,pś;\5+[ ^\ .9w#f6fUcfWR0Ip }y6"8gcfWJC%•⎻1]&pbfcfWʭ\WF2vO0c}OacNjTSM1k{YkUXk=tk/~hNp^͆l: ՇpIX&\nQ+ǶWXg;=j "R\CJ]vM!_Lwx]1I޿jmy#mRlYx;Ǎ Nũw88~{ӿ_&\\Ͽa6Gˋ7x!|:ۼכ sCkC>?zv\7`ww'O:9^z?ҩ1xp/7ҡ\CicI[U]8y߃_nﳋm~6_|)gW6bvzTv ߇Cz\*Y}>[w||NO_?@mKQcއ7 o%/\]gR )U Wkui)> c]Rmmw kQkG.x"( Fd29\G{%wߟ_ W7ܳyvwwn99\!]>&./{ݢ,s{0\[)a.>\ܕs2TCUzi?Hcewmn`CJϋ>\ЧkZ;7Zzשּׁ/}tΘFY8[U\:OD}ޮ?.pYQ>lF|_nlu47_|o7#S[]7 scҥ{&8iu[ af >.r}a'LTO2ǴO z< \;fmeix NZ̓Eg-N؟mSU "`:bqi\+VrAc"R;/=*Uk[ɴ[4Fej%k2o&my|!%/(=2(,< <1ö*}6!PL*Ugב\D. ٜO5AuBrXzg??x]b >i3.S;&Ŵ,Y#&ҫN'p߭AݹmO){`NKY?;,0CNpJ*y d]Ĉӱ6DXT(QD"5T~Jщ ?)9.]aE0eO>A)Ԛ )uH҇Y:4doF)KQ"0JH7[3W7SƤnXKroN5> ~$Lrx?ط\PIy{T)P:e'~:ȶITi2DN` ϔh={.[g2i1k ʅl91 %ak1 D1Ujna@B%3c0)Z̗[~'S|e >Ku[fn ~'[ڮD1g>]7\|vj$Ue^vF;`Ɛd˟{n$,l}6;ٻխr0-όy-B:D/9m˙}Q0y-dP9{R ]w1]LBJtN)>cNW e 6Oѕ1%uFI1%>TVN=l"mMa~s|2}1jޞavLw] ]YK 51K#ޟ1 7NdrUdʹZxɳd=w%hph乮Ƃ7sn<+^^uIZV 0&s{g}'Ūneӫ[v> ,)=Afs_]&̺4n(>!2G d\ tU- ;J}%{E|?ʢɈ)e&Sb٬RhMCl ʬȔQ[J(*sq_~1J.*en9OW>*"iw%1^Ul2G)KdjU*TCj^0o"7շ (K":RK7Qh-sihPQۓ>tq:'ms@;ۮ["~ri]]z++4xt/XvBk:t5͂\ nm;#76AXwmI_!e7"8\aQ-L IQ_̐CRTKLUUwwvr(*o!/+IJ3dP*Kp6e[ʐRT(2*Sa< 11qz `zw4(zNDwJTid,Ffd,b+Xhz,|T,\Rm~5ûIjn*뻝]Bkr7 *7M>2w%Qδ0h`r57F+=Яx@*hWK1Q%9Y4\9SNJk-(V1C FQ;xŚe>CzQڧz7B>FCl4w>4OYlRܞܞ6 i @6:,N#WM2TZxA&,P%Q!r=0 CH%9T+눤Ip,cJQ.hLDg ZBBC"ƜRfZs&Q8Qy=hΒԅ҉EE0}w#~#8˯b|8,q sl,ݪ`C/:g|Q-§f_r$X1(U4LȇQ A"č )0#sЄWd6[UDO?rc{ p sh6;qk$7%7[um՝Sp *Vs3}Q掤mG9CWhͻxٯ;Uǚ>}n.>ޅO$#A;REbM}s:vQW4_ \3if'?N&gD+=r8)~lg39η|<Ϟ 3WoM5&JͽclC<ޓ.`mLPIjO.RuLPj3{o8 턷1ೆMLs<:#wf Xwqo)^Y13%<fWM;s3 ]oom!)\HiX>yGE}ڏ1Hv3Ʃ9=/E=_^P[_wp @HpT12E MJsϘ:S^Rϼ1"8Dn.zC:f0??ۛ9x' /=V7 ?OL ?r9Y4'%VM(8[9J8pUC3}!BRrMD,$m7*z@^ 0A2FD 1>p*{[ղK/\<>m.Ϭ솮Ux%a"r-iMY:sY+H~u2 _n]5 l96jVWKv.z^ir-w7 |oUxo-{ޕΆ&\پݲ,g^m9mjNX sitp0n+i" 87IgU/\a1deNyi˵)I"i}Jhx3ЃblQaqt>Ug۸=<*w߅Y˔ >iė0OCFn~k|kVVĥFpXKmT>٠;J%QAQY!S&2F"ҁoMpol%LZ`?'EKb 1u['I_Dx)rH`}DFz<% $Q) *8 j]*dX Nj5J5mpsIxx0s2.ɘK+7 ~Q\ՌQ.ǡ_Ilp$$SQ + )B "J';ZΆ5.- ]I:$26Zx&s73ۉG,`RdǍ.R\٣AxDCd B3rr%z}Otk=s\te%)`ZF DIǩ3h t4<) $4:1 HAͶedW10): <0 (.hXk <6)U{jm_ApN; qH981YK fA7)P tpggao?ۜ hsT\gu فkMUqIPCͩJzAz rNO D9ƽӉG8@TYNA:Q1Ȩ 1a^yszLOdtuaƉ_?k )Exk/:Ql>iqp>1up s]rLaFEo^gl[궳aq$x+SCmR^$qvTTAIm6hS 5hd ArV,@8pl0xz#[".ʡ:mnr5|In :r||:UE7Gu .gvpf,k 92c,ϵm6pELsM9'JF菃kOԶ4ͳ{ T6w7=ޮ*gQh%18'r{5+dOoOx9#tGNaX9*|@103dsq2].&zv5t8_pQY?!FxVfrY煅)ݐ}LC||H`@SԚN7 A ;_?~8z?݇#Tlч9\H' 5i%-54ЦSŸJ|qorڠZ ?8~75{#GrfUR435`U>od9#Z 2wSV2QTXY,F6nk\ާT;;n.w < xzAY_M>uޟ:Ʌ 'J$ؠ^W`jUx*W|)w^.K'C7c,1l>K֬D|*P4OɳPJdN©tG-!V^xo}fV6ڧ7L;ܯ6y_5|4ebo"Mھi*]79jEq&JbTPh.VKy3!H 0lvVE~{c^(wٛ{N\~|y]_ZDһ)|1'O燛$0uTZU7oDm<ޏ綒UY |rI>$6 JMƗ2l%JzuXxCƦ;A0Kȱ!h*bBym ѱ0QSсSV 2z̩$K;I*EjO)Q"5զPXXBfbFIZY%jIړ('W01EX6i0]Kϛ ЋUd *0hFK&\7J CD gڈA$ %:Bt՜)xS5NTu }+# m{JXA+hť}9V"XARp+S\UZRJWdmo UW}*U+•R$h9{WU\>tR:5++zbWU\#ez5pej5Gp+jo*}*ૄ+\lgKmގnr3o!ft.;&L4B@ џ:u|rvi<əF7(c#o$w`59ڛOo](t]tw Ui;LӯH=+؈*VWԖT;\W@fF-QuO`) $+`A'K'x/8)mȠ|\z:Gp*WUڗ"WՖp& {WU\7p:}*^!\I넆=+X" \Uq:;pU$5++eZ9su9>m36х0<6$@ηr< x=Ng?\ ,{0-J/'5i¯i#Ϟ@]\Q| Ֆ}5]_󚏋W/LP/?oDRJF(D|P6:A1oV/w/8ΗƼljwsz'#upi~2e]ݠ( _*M;KB)g燙n?䇴| ,IP`bz#:xMUC:vqzo(]hT_]2O&*cm"D%%}ZsRQ7^ +jT!&]1Arv L[Y[FlGϷVV%[@$( xa֓u€I9ql!.`De]%$0cHH!u.$N!ar2t\h3q\hr=z[Lph!<WyM AZu6s79̈́߇74/TӼz:1CgG|*5{%k{%T{%TUORj=l -!^]C׾vΝV}Qfچl1o4p5{jy~v>'X:VFJ}VX;@UF2Ř,<%+ERګv^16ܶHO=3|{us֛?<7߼5*"= 4A !"#&YV"e0NwN6PuO yZ/vᘗ h0igf]'󜇤 <)۱.|{IAD\X$;Ys%؉,d ,|'mv}Jrl|W`x>M-<:VO<]uuufOmv04rsٻ+~67LOד Ҫq g>|x_aŕvw#n7nɖ )6/R+-Gޥtp^ȥb'3hV" >1ۜInbǰ1ց;%YM)Tx+m0k(M~j[V#Td>iVE&6k(rtȌxTl>8ҙNrB #U\}:)Fx%'Y7D r&|Sss=*8w~|"6vgZ'\LE֣F׈q2VT(D)U̐ mօb)3r:Iz'uzuc3c^ h؇om$L4'a,-w}!Ρ鞻^x_[+uTZU7oDm>-)>, ]͙N62:bON"ɇDV?J"1h% eEclr)W)&v,Q`!,\T &e/0Ò$ձwڙ8K4SĕJ̳J2f-__Z~蛱):FLR%rle^Bt!LF'}ED`@t hs*ɒN1 bSJxM)2Hg<@nXd,Dv3Pe$JBЬj9yJ@a/²٤HXWiz˜hveOƗGۺd HK1ڰђ>R,QB噆6bPB u՜)xS5NT1c]Fͳ(ȭXmΪpflH!%==!0B9i5(0w/37QN'ʮ!_Bb0i9cyyqTj!@~F+Ers^GD)X,QIIŀ"DZW"|2:TN9LJʙ4F*ر֙8_gq+z̞N.5{d@Bq SD0 'KQRRA2^-\ ]$2K ^k`-)`)lW4_XflPs9=_h,b-T|D9 [KrR/NNut/)!OsIlH6?!8Gi( VPBkp\UQ`c=C:3\mx=!B@R݄ٸf.?; 1nt֢QB<(g%Wۧ y'?dy?@$ s50mU!0K'̶5 @$eTl%~}+9nxym+ZK%N:0KDNbbɱ0D) >h4HtvlAH&PÙTEU)J22LFjV[{ky>Γ<+>;$:7և!)r^e^w#@ {3=o[V^]0zQwPR)=_jX'~}x2{{g'?o?|_w}Oر}wףw?:̱$||J?AnGx5vugJȕ2Ч`>ec7(hF;=)vđFr,wEvǪbڔu7{WCޯ⽍B; Y@1ӿú=;7egU q#H6.˶T˰ݡU)"\B J697mř_Uc3yf=6$Q,5 D4&iQ)QMwH.io/ӨGqu?n2jxsa"Z2dXn3|?YL4/7|IIB#7ys߯N/A ~;A.񯾞ȥ/?>Zv`n[FXq.2E8R2ukYȃo='g uBc yCpDgY []r涭NbZ^-xoyt`Cvq ޶4ƂAYw]s`6C4|fv=- ?enM|R: 7>O Rax;zˉ(q}{}43NXPIV^ YYMB怉.,O/Ϲ:<( M[X00.jJ& B9I)iQc%UR^i-"BWoJdY>zO]`pbGnO>zi4b! ġ;ҹ Dn:Y)ZV"%G9SD*ι Θ*Fu)&%~O={z.9@sJ(`ZbPG߂6*b"AdTS9o|^q}۽Fӣ"_ho9^|IZT({pjMK;;Ǜ/%<ɹlei]5>'mHDo;~C҄1#F)K;W֩22FQ/[RPsVTY.+3&^2!qrTkdP!)Wg)rvr 0O'Yڔ)lqٮN/v8)r^kOYb*[ƍ0vauuFt -\*S \eUUb[MNJztE)8JVؠX\3:րDCESJ5X$CG5P21C7,rR!Db:Q"u eEJi,#gOr+dlzeBljI{m$BL%O(OR$@&sBAeU(TQ@h-cf\rcU^F51B-F &6RV~8vM ᫇ڀ-'J;z[*yxƳ&;w>*YCWrl s^ٚ%p(*Q *(O,z=CO.(gXf1y I,HJȊ"2:E|'9 2&&NBPߋzw4FՉ*(%XXNV)8Xh cX(mv%$7s :t&'mPhq4g/e.J#`R@=BLɉDbZtK(PPۏװnu u9ȭՓfeAh{׹~dߐYnƟ[Wmt.lGC}6e-租wmzyxw]=z^irbnO>Óh4[?n9_+D*\?WYUg:2fmCDI~F_ j0ڦ~`螑g耘3!ψ}MК)aOZR>_fbid?J=!TJJ@LTW*i@BB!z~rd4:tCYFDQp\M Pǫn.FP\j04LCgT5-ra Z e4~;!P 1@NOx>*:!\qP$1Snj]kp9qM+r>[W?5ywfW(Nc1"ϋڱ0bKl꒛Ns>Sr{Uv@\\w{CGtɼ#imQvlɵ-^WkEǖn>ޅ$#{S|O}5э7yCeſnm?'@+#2p(-(V~<[&F$t*kۭ xsZ65<0I]-/E y ٗ)T踬w<]7Q3\x.ULQ-@9:S^Rϼ1"8Don.zC:j]_MRk% q,E)DŽmO#ß(2h5ҝ}m4aj&a2m୕26=?p|XN&oNQgW~p*ůp9~~B\esF8~a*}XjAR/yg\ ep9!Yg͖}E6a'gmPɫ./4x! ? ![B7m7Ovjn{ I`T -u6x^j~λ&y5;hs3n _0/RqݥyH{l{'JʛtAwq+!@n-,Hr@͗88tld:yk$ZcOKcMݻ-zfŇfw5|M7`!]8ЌכVt!N]WJ)i u![iHW4 nlEWJkyR{ϊZt5]ā[ҕvt\1Ԋuvɮ]M&{p]t(\au56D:FQ O,EWmzԐ<5+ ])헖uJ&Zt5C]9͓ ~L$Jq+ Bw쇊h]5gֽzm/5>]\:^Zyhqȩ]Vteg:uv&˝rd3tƔ%bH% fܥ kJ0.⛙b+nlfL~C))-SNK6t5ft*J)y5!])k'R\n&RZSוREWsUBݟ1'XOvcʝLjI&Ѵf4!M]JI5-BQkIfVDAM]WdZVD稫(sv JqYU0IR[t5C]% UAftL>Rʸ\Jvlz96K pu5PQ&2BWMOK J%4+ō]Vx ue7+O:ߊ'])f+t`ILɶ+u4u])%/u_hv#K`afTK+;Ȏߔ+{)2*L3aTlʽG,6$9oi1eӲP{EqbL3)6pVJSlPl{Sl-]]בiEWJ&ު.B6ʺN\8'&6Nl*LlFӊKͬ3+:R%% M`Q\ܐ6j1XjJL0 ε3 .ftȓ?gâFWqǦx;[F'Ӻ}n mx(J~Z#t]=ɤ8GNѕz׊Vu-,YLC0?=RҕⲴ+ 2u])eEW3ԕ#zݟX])nn}qr]PWL&lq"JqS3@k|qS{Ǣ')Tv`9\+F SוRNDWr4!])pCR\ ɘJ)iYȽE]͏ '^%(Ŵj-Vs iZS;n2M+/ix|KYvkM3`4u])[.s_|<ݫcOdW 8@" d]}=J;6}:6.><Kή*G9*UZtЦ'6ç+>p}3R+~RDWQftζ+=-(-{C-J};ٕԊ6uI]PW̉}lHW й+zLv$SוRڰj 6;0`ҎIha[5\F!cԑf6 W0qmAb3۞̻E&SlTsz^vkqQ,Vm'%+ M8f؊+ͬ*m!fbq0:ېX9#33y])%-뭳U}&v]S\͂O-n+CpyZZ{ocQz꬜yn;Y][ꃂ}wwvW@-)+>B%wN8FM&6ѲP|.omsGC8.__V|V|qzA*9[Ҧ܉'Yn;Q =NܫҬŷ-yMo{0>2޽^rPAE"yGj8âo|@|?sLc}@#?B@A;Ͽ%JAwUץ"\c/W?uև>rb>BtTlOO+.P~s_(7|WWw6ޣe-?u ~_뛫..ק`1P8nӧ;[$}=u;&a*>EzK}3LIqzݞ}8;8X1%t[Bz2쇅 .RX` $ME|g$=y%BVZNI+v"Bm4j-h%חH\ɢ%@n( }\|a+-6 d 9Rm4 [d; dG&p򕸫 )O"½+uXB 8Xьs9$ԍ#cԘw R$k@LD~sGq-[LE.  ?}qlDT0AߥȟAc+ʻ C!A:2 ߚX  %IQ#>S$~_t?g]-L6{9@֩Gd;bsʡ:-'"!ݽWeS-] RLQ ;-F_qj0VW= 2)#LӄlAG2}BVQ%b:\L# &W s,vL’H oFj*Ȅv% }@0fj05gȸ"3O|,[[@L iȢ}/iC`ٚL=`{[ t+,,<PM;&ɥ^Y*`2WpI!0y\t% A4a=>* 6`drip:Y AܛK̘:{o 2J q9eq[a1A(f0eC?`´R!()u~3 =b;Fu!)ɐ"Zj.G=F0'*2RH/q#p0rvΆ$Fy?{Ƒe aMK~fY.6$_f #iR#R==6%Q-vxDSUνNVQ=|Z< \ Iy0|M=M+KQDD8'2&V/2vNR"'Dh>`V9m:,|OEɣZnuq;xtXdu,©^_<U#lΊjl+ɒA^VN"}~=Lex?Hև6ХLU-FM"M{F7`= ѥvw/ |-Cޅ\c:tIյ2I{<EpO A꜄ر, AB!KhwXCA[1Mp=vuJߵ"Н)M , c~7Xx9(I )HM.DŽ#<ZHo2"*jQADʝC!kQ2l]`rV')UK' U33߁^qI3[aV((#LjM[EEH̦zށ,+`~x.ڈVbЏQԠ[9U!6i1r[avGjic!*UҵsҚI2`2 D Ts.!]6yex9ЕNh5›-ւ L+[V}0E D +PxVp* -]Z ]BLօD~"q`3G É򠹧BSp2q -M\ГFnek*E!nކ`Am6f j;OH1`!ޱ4M07!#i!:.XN Wrh$xipHm,?t AzgJpFP ^mgr+`7|AhDߩSզèO:o n}AFduZ귞ۛW GeH`7|sbv&km/ge/_qxq{t~ћYq1~6_j~q~.?#/"s*pvV9̷{_/Fۗ:.Fd:u.FmgRZ^FPȿ@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X tJ fuY1%9%ZwJ t:E%#)X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@:%Ч[a]nѫՔjoۛW?εr(n@{ pЃ."\Z)  ,\:ҿv=m-<.gUf+}Y6^dA41j=^ƚ/\w7MU5֔2m=jOk`"o-+Z͋L${ߎjzïtP?s?W$6 Ҷ1)&5"ԕ3Y=Ezvl:9oenۦSU5FDoMCPH}YI]ld}YvUlkL/Y[1cc٩ΰSȨ5u˨BY<=anzv[*Mט,b6H;iTkx:YsJj &&r0)6USlB#'b;#EKCW fڣ+B*tuBtbZ82l sa>SF;7X9 `+LpcBi, ta ) $`0"PFtutM~HB WU>NW%]#Rzu&,t ՞p_Ck^Ci,R{ЕbzjKe]á+kPоCyl#b*t5 p+E:"_:t}>](# ҕ^i1 ". nC+B_x|J-< r ԽCW׫ (LWHW޺OwS+H4lv5w߽-P߾JW^o0Z/ѸͲ#o5mt&_gthے(f-)`hgڣ_" [ithEWpXP&tLW'IW^GBPauDK+N]J iU'GE@+iPj^9Etnߐ6Wv0tEp ]ڗҶ\)UV){Vj8I05v(tEhBPFt+ȮgHy'/İ+/.Pn=< {ЕfzjK7F<8J="/İZ#ULW'HW*(#[7"mNW2j+;c_|j?/=վZ+z8p ثba+ ;CfÞv~ANAENQv:sdd4ïȇ_A{٢MxcO{a()6UGѣ$Svegm#Lڇ7WTa.B^KxSݦȗ޼7oSEf\_G^Țbb4䈻&@W#J]-}C?%ߕo?O; Fovy_z7_]j?gOx=Ei2Zvûo6KƟgm_L&߬?xxmA&_,n>ۻ<%[_%|7䇾mo^))_yVhe|L:"/ޘT,rq!8hm!zf-qbs)7t[ws ),_5͙h`t?IYwkM:۩ B|knKFt1UN}'uWduuUc0[}a8d/ i<7,y/0եrULd.Am lr7}u ߧ妜slimKv_ǟ?K|ykl_?L4ʿ>T ?բ/IW=y75W.{rwT0? ; Otwќn]t}>f_ lpg{_~RJVQ֝lvOyOwGwC6hahx 2Dg:G|)qj'ךt[/.|мf$@ս:הکNj.dVM^/N#:.IΤ|m3UW+Yy>*F76 o>ЉwL.ЍifGbrLh^foG}%r4S eEʽ=|!MH u(r#xʟ 9eŷR̩d]z=nuVċK#tRӏPm]#3@ N~sL>Ke2?J䅑yBo*7nsڏpd#MdjL-]Mu)UÙf2d]iѯfgJp1zЍ׳ɤ^glf=w!ү;C068DqUPuJrZl>XM0!C1 PF8gtFyc;S;-C[jtQ: Q5]e(eRD<,MX0=^-_#9_vRP{_&i9|s<*!ԚXQXuêi)Nf\U:$3a31011dVE(Zؼ4һwmI n;RCImrA`C ["L CU H5{ X8_UCp,cJQ.($IƂBTU:&rPȧ=D$CT{* %' c9+ks˯zH>m 6Vaz;E]LpC[gtߎ3슇sʥQeb%U8=I6QF*o * O7g<<^^|nǣUcdzjSSe.AQ+=(CPS15O''uOzIqK6R4ل$ H4񁪈p>D'MdJ zE1f3_8?΅Uh".CC(PԵe78 ٞȎRJ< ps!Bwqi0 '"֩ ydD뒊 %6&| \RB^?^W'qr>}? p^4 gsOu fn>xz9Ex0JyL:N} m(o0ΏBZ()T1iw4lJ?/URG ;~UQ5YxeBHM4GZtIF 둳:R%O÷#";]1gn:ZQ57_:}`U $ݰmxEv^o3[ iN)SS|7ěk:f"Y17#!jio|3lCn]ŐKܫr#\T1s喁Pj0oHZ CϗGh ėi-^ãʍly;5 .WLǻT02-BOg{꛻DWw#Oڢ!MWM7=mFOJ*.{_F Wt4ǿgaLBcr.>zw9-{~\L׍6lg77&f*Ҳ@[l5 3} >٤h&n3Ѽӭ{eDt=6zlipz|ƳHz!scި( '4Ck Hv3ƩJI- N_PGeg9`L #!;ÅR0EQ1-t2JSycDp܍i9WO~c]ܝƊl^N=t~o8Pv.Y-.9sz2}]q*=:m؜Mwə}05xeywoh2R7@c:Z0_ւ"к X8"sV40~7y,.F`0JX YCqsG%ޗwT+ltTE~w}Wo{uq4LS5>{ܑ‘Tü ;nS7Îj /:mF+VCTRLsR[^ (Z Q<Dq|kp Z:u)HB.mi{8y! =B4;؞ DKYDR4K:0`jiPI&P ڦQ'ܪ >0q ջW)auKL`1ϘUZǒUs"/:ʄ{u&]eUJFJ}Ʉ2j5"r,TЩ~Pۮ/d*MkTQO eݘ+U" rU>̔.Q"e$H vuy܎՞p칝hiw.tn<ĔQ$K*q<{hAq 2$܋CN)Q>piib$B ;\FIqʏ%D3r41`@\da.<3%ǀ+_{[g\y{K2ߔ ;:FdCAIhEܛ|!hEI-wA&2G"14X휉B<!6! D)oKg {Lm{:#R2各ŧ0&qH EJiڒ . 00E5JKL@0CX"!VRAQv4:!.\JJ*Z0:Be*aȍ @, ÿN_(/W;cQ;.硻anf\tgz 9 F1g-w䠓hK/srltt;x^;6oTsQFKk#ܗqbhrMS3S4٠+J%Qw!!I%pԗ"GA`&DX4i$NJF) ɗ8?G޴!T{.Ǔ/D6>P\Tǜt2 ?">1 Te\7K+ârg)c68f cT + J9.L|(^JgR6٬/~yU-o^ZU¥\ Un&i QPx6:/y|T 7IQ/O9?5gz?bf_;:Q C-e:$2u^`bfb'Z BShAc0E2DEǍhٻDpHJ0LvkL}3-eeDwΣLIJ.ഡ(8uƸ$d`xRNŐIRVIeZln, aZTP7X!5:{s"E5MT\f BK@cvNiquW1kIRs=vZ͹8h@"8$d5!VY%ZdsjG/HOuNd-B8KTLL!WˤܾȰlX2~~{@SuhCh:Vi 8'qY;&Fy_އi[]gJI(piUMuljP+.8́E&('j9MoϏЖ͵ٿF.(^@D9p 2'UمI8Qa:;Pbo*x[\J4+f[3^^><8^DηsM8l&Tn`I 3Bd*CDE}{> _+7wՅ.Ϛ.(vŜsi0JY.VvA6*̍K;7tLBnm>խ,{Cqpzƣl9{:+#:{ȶY[Y9u+ęT}OBj+b\Ybՠ{Uփ F" .ΐ˿ޟ|w>;'lOw_7\rU6IIL_o~jIM͍fjS7 żJt-Ty+v;k%@(~LYNuH[5h2z̾R-S3U߭Q2Pb7QIZ֖=MpKAԎ 2pD0%J* 2ghp}'갋֓ˡVCh} 8B 0֒"r ,{ڈ@d@1`(m4@ F" zhfjrmAC>-NYHS`3r^>޿;O r{jy_.z?wikK[wк `L ǫ/WJ쭡K9iy=wEy9$#tUsJE h% ),Nhޓ{f 0ꢠ9e:)`FHϓHYg@sT@߭$'jEsoM̕mI{k  o[)yCG)%2׃T+S/o>3+GT-MoyB_2`gvo8F;AqfTGep\HAH[GuE=a-#Ko$A0 :.{oJntX^HۿL.Jhis *=Ƚe-@ *ɭ5'hjMcX9\o:_bԀ*-c1]8*.e 8*o"Wl{uq478n\j܁|\[~C=[0s0{3tvs}eBf^\pYz]YelCziE:zWC<]Ϻ0Qufe⁕BXVR)Tj !n? gj{_;!Y709ɧC"[$3Ŗ,ɲeR{$uUMV=|X:P ƪD4x}lpO ñdA+D eƔS-P_ "9esY'MIwCL]΁1>lb >i୍1`FE+=1 cubXWpF ,׋mϴĴ굂.,2e?|/*+lc%wM97M=z?w@i&YN9u*$10 c9 x@& )cUJ=85¿ Rdi!ׯݾlC=ϺVq׳=+Y}|yLF Mim;#їly)Hc mI{€l9\YK$+3fO^6Rr\k!dPH18-x-',o*ֵYzGgqK~O:gy Rs!7^T[Y!y:.K1G&GO ?LƳD"MpZ|G6@ 8 vJ9*e~5Oܑs&\x"Qt )4(kLyޑ~4?\i~tmP"D0:bH$IHH $V"!J[Nɥdo-Jyo9b"r.GgW?b^M':ҏBq]syNY wP6!1C3H,1gLUh{:!V\d[o7Q"ГhA^kfBeC&vCMgo3!:&^Kc_*Vӓ*g%/x"0os4;hWA=|ږ'xz}+yUQ =W^E|%~v9qO%8KeX9 QTEFet3ʘ J9=" :qp@62Vg;2*հf슅2 Kʻͮ 3P`d~ #n0< ݢ?qf4| WvcX29iShR"` s,c"M,1mƺ{?姐8{.  6A+Zب3vL<)U9̂68ۏ/㊋y,]M;vDmy=j vS.D%y8U)Vr82v4%d a.'7#y$G$MX+Ba"JB|̥*9I{َS[ b5yeD="hZ5>pJL*|KC.f#h*' #[~)'mVC(r}0TR\ȓi]8Oyh=Y1:iɮqQcSȸɺ$pXv슇2+O"ԋey:K5؉m.ޕlxB\yO -r9+<@jN/$H/@8Xj1[>HMqi(j8&{ep=I/WWߟ~eܯ^&]M3V2ŀ$+2)Vvҁ{,A?;9[$mА%C\ tm.nxLg>ayw[ݝ槕ņݽG?$f0O-D^q3\N{J?0y"; އӗRҰpy2XP: ^W;XC o)ۈ&Дn 7FXcQ9e{ ʹcNhQ=&4/[ABlή%SwJIJyQ!C+ǜ+wU 'U, "ӦZZDa}Vn1hӉqC/YhowLGC{j覆njVn:y z,wIjCRI&+ -75\FƜLP6ؘ@DoU ձʪN22{r(~ѡ傿M&F {~H1mh<>5bn3Adɸ+vIIپar!11ҢC0:]I\RR+ +Gx!LSf*y`q^|:};)3hV}WLXh6S Iik5{hس#njVgڸK48(Qa@ӊӔ“/SS"U}:db?) "?ă!mLP"ra%).u),/Pl3r1+D sK^?B+ na^M! }&7ң/LI3fXSZN—Ă/=9)+};!  R&Ԩ\i&,}hPe~tIaи:o8ǏxT,Gи9>@$ BYCHAr%uY`HCo$2VX#AiS^o~W,eSzk|@*kO'JcO/jޕ"1Ǟ^OZ]:NSUWv8MVñU@W \|H&@]=O`{فy uz p=\:6CpE!;WE\P]"UR \ WW$V3pU5+pE~pU+ĬM.:WE\]"-c"[+Bi!"5".t]i;\) •CbWE`" "i;z*R>Jv!* qUgv <)W6{zCp[ɴYm]=R:PfQwtq%-MpH{N ?LƳFsDsp0{ͩ3´yo U`lMg`իYek7F`͊r3 $-gEJ;oUPĕqǖ*#4>+.Rȃ+7'EY9R:}oNh>ff<98kAgsZ YL!3_{Ͼ'̝vh·u_ 㧿_tp#?|) V*|8RD!k]b dT 0}/JS/˿žvzQj9sd>1zC~ؽ4e8->_?Ե|wEb}1`_L/&  >3"}1`_L/& b}1b֣sFfq.iaeF(ސ6YgFh mf UD:cnKl[;\/qM|p}.z u8ZEZ~+o{({=}vGKmԓ^Tz]%8?M6bbOѸqps.3,7"Rn0mMS?_.\nhZd#-֫y+ Oo{ȞVwHnܥle&ui[ Eۖ(B{7׋mϴĴ^ֆ|]_6_+- JX]㣶qバ2tOi&YN9u*$10 cT@$ x@& ),jOBeƔp$&pPLhCMd*0[9WM큾}ݿ棕di!ׯ=ob]m7],ּ׎#>M"Zp& dYHLX9VJE+l,hM+Ԝe&gY(kw Йdep&Z !G"\e &v ^˳pd< k|ֱ|į=˚|g)R,%D8whܨ f~`\џFFRT aiP*טhUV3'dߨC9J~emP"D0:bH$IG $V"!J UNɥdoM~yo9b"r.WgW UV壐}Dܔcsyn9>hY H6!+uLDK yĜ1irWIu.W5ԄL[\d[P򷘜<$d$Zc+/ךPPP٤M&Z&^I \\4qmݖɽ}UnWu++jk:ݰY7u9|7AIY{k[Tdjؚ]9 QGHWQFEzA]Ne dkcRE6fBUQ#gF:ZLAO &c^ :qs.HFjlGz\Vӌ]VBc^p)Qyua׋4؆Ӛ8,>3U g5ay6`ǻ۾x(Ž3n91Q· zoͭTYs]ps6Pؙ@F>s aTVC(8  m\ OC > ]*QR!"Uj] ArQۺ:]T!Svh'sv2p "d]&"XlJ٧A"#RDcL2;U3۰#˫RwYu`(@}#wT+wt2M:So`̝ɘ v!.G 'Q(3ꁯr8mU&2^<}nԡJR(1?F1X/Vw`6ܐ%τ|C&-x_CɄ1v;yvk|v1Ů=SDdnuCAmRg_+ !ִ&K`>i@?j|s>"B#$1N`3/&WИº-:dv_Q8d0@QinvٟBx| Ɲr5 '4*n 0UXGt46ɨ1@ QJQ?\w:2"e>z?enpysɊ$Mg7q64qB(p>p*h1[%$9toO8G?MoospQ4ތrT(2ACVu9]l:|!F %PU3@ivH.J#1Wv(j'u4La#ֆLu;gzCۺw?4#9!GiCO{ωc?p+w.3ifW1+Rהz+Ӆ7.ﶌPOweaNLBkr_]8o`vIjn" Wn^pL?ӧMڌlR_O{,8wʺWV LD 9cӮbL냆c!.yֆStZFSмZ!$,s8uQJP۠Rā>Yy}B ߅U$n5 IJi*TB%-e8ui-L 2J/sOU #fvtɊ Ή~b(IC2@gHx~7tQoSw@r9IRJ$%+MT^KND pך{ݛiGR>Y8Emu4$(*BLi04ڜ^Vl2,3j5Xh.PSL4L ָ < + 0tR_WPIvVIcmfMm.W:)Zw4pZ0MU33U'~Q.6^f1J]hy8Lߢ0ءydFQ\2j,(!EJ"x1cuP\((apFqFNe+H^(yp$-rOw߼4ڄ_͸u)9EG)%5UKb [Dx[JoSW:yƯZn`b׺m}s›7al/'c?_Z7uj>XHvBxQnK4yDu ;W%Y%8 Q *h09ϱs=[*@Jlx)9,Gu)g$g,".ngq) aykwR;uiVa*Nyw?:Vhk\Zo:,>}mga28~ȴoyggt̟b)|.J;/j|Pje]R[R RQ ^|9U^I@f~J̦DrEC8E$ x65T;0Q+v؇h)Q'f*g)TOŚ$7-"ipS&#kge:",rsT;rfE0f_RY2*˒s# N8AU{uR:]edJ9P¸Xeu]9V-{ݯ:ʣC-z&ە1$x]tݨ+ƒ*ӕ^To񠂶b==lII9NcEKs7T/(H* FĵuAܐ]*P DL1u?*(:%ya6FA".N/V;!Q)v )DatZ=M̥6 (]ꕣ,gd47^8u-]{{oR C#.dCt$ 4"M4"Pí=`?x\f~E,jav_<*qM墦L]rVoRk,}_C u+^_"Cm& m׺6fEX({ DZV=7K[먙;oogy|w4~f}=Un/ގG.I gvߞnX9Gx?ǯ '@`6h*mK=>W[!H$E|"J8K{g:DHB"Y"vW7v @.Og <:YO.S9Q'D\ D3ɰl7K;_T-W]?exLBAR`1*PGϥ Yq|'! , c=p:<؀gpzq >QX$p1hrWUiWK]cC0LtGU8sM1 (4r 4l U]ʩ2`Lrdl^oA6AhD5Y@t&Q6X]̣ &I d`=p_;gq:K42OǨ2,Pc;LBXNs<@y`-7@0 i>IYs?wCsos.Ђ}`EƴSI?:LZ3f-EXPܒ\̩=D*X8DtEIU\YAˠNz"ȁ*e zuo'7WRlCF~I3PAuF?8s2-%{Ҵ7mƉt\x&~lXi.f`l8u[gR"2[%;9 C8qI')If~lQ,9$ݢ) %C/u`ڤja)nLN۲ óӢGi8-s/#S0}l}T>ϽKB)ѓ~z-~}]gQCo1JhޘEfwxk*nu/WϤc.ǒZj{!;4zX8\SFka&cF58Z=d:'hR^`Jˆ>ET68kz$9ru>p c:ژ}32{ :Bۙ0cd<2v,e2IZue2\te2_`L \ \c":x*Rj +#t: W$0{4pEJ{4pE*W$ἃW<&n4OzP3c5L;JucvwgdJ/og1O RBĀt|9_q_oޗ#Nps8`+霿&|#t1枕F'x@oGE-xuZ+Yc~=._ZP^(mjM0F8@'wYq`+],MOъ4xjjYAgaQ̍`ֺ@۫L"<`w>z&vGHi3_]\qUC+r%•{W+1-a ਹ*7r({oVaPeU %&:_~z毽h:;5˙G67\85o1u-$ LO-^ IZqSԲ*V \c"WEJ^"\Yz:D+9 ]%l8]T{u{{tW3S4Y#,oҔ:ݬ|sЕ8;pE\<HHaHi:_ aլ,ay4?_^ϕU^(}G3wdzgδhGW:::(gQD!SAs ږ L.MI"QӢ~4l g\h'bn3Adɸ+twI] ]:-vjԶm}_ҁ:1Αy9.\R{e^S f޹6B0K9'Q1! ٨h:&`V @:;VG^[g:-ϧ&O ?/Pz2RV37.]:[崨Ϥqz.i`(+P`U#b7Vuǫ<ڎuy)*G>G2..Ӗ -"׉ŴH7cvw|DF`FfM,K;W֩dfQ0 lH!eU}{rlG+qwoه#t͘w5=3_ڏ8cr+fpI=n;#3a}Z)i-7)z@PRs38X.3cΪd9W:3ٓM&'ǵBD́38<p׏Gi૿>ve;mְǮ,,K?˒zs\4nЄa^u\']#rD )04(kLytA/Ԡ:AàLҢ J$2ȥ?0:bH$IGΗJd@E Di*)0 /r18-GL\dՙCeJ{*j<Ҡ-mbr.-͖9 d鐵F2,􁙈d%1ї4GsƤɀ ^%ֹ\7Y幢mq@J˿br"$$\ȳڹyxqM4qq\\ڸ߶nIƾ*[ +5xqnnNǟ/GMPR?atrVdomW*d/dBTE=jʫ(#Ϣz .'gs€1{ "R!ȨΑP \`:fYsL'q.eXMW4c[,tXW,\JTmzU"&_vy& d#6•]I{,δ)f4) Lk09w1e civb=2c(ƞDMP:( 6L36OJhU)MtZ<~4+&桠v59u1MNhNH}L"BVV Qb2h-Xdt>xKgH2ƑIĊP8F,# dT'j Wa[M3St,j2bCjڱ-xe lY{&'tѮzb 0YAp]3E? %Gh2Gͬ®1&Д: zƊRި1$9BpK!HLlW%lScIJyQ ĕcQ*넪TOBڨT<1 tYpȂk1`貗Zegz_@]):>1GK]@LWd^5>*1'5&` 8IӫC1>V'Mθr~r*lg2VQfmھ k.I\*w \7IDKe%J8 .") gE JM$ˢ Rz:M2gHTа#Dk*7+Ιy۱ BaOu~¬\~E`6w++~=X6o 6omDiLJ X6NV3͜" 󨥗AfC !^\;`!0=0#OJ]F\++R,YtFP7ڍhRbD2H@})h)$!<ġUh \ó9*dC%:~*?*Yi"_=@Y3R۟JKllҌb/k9ֺI1sRǣ㬂}x}n.($D͙1y!2xӥpx)رpxD# 2X ,*o1畖hʧ,5TBEıBv_HCb9^yP]P% Lyɘ `C H^RuI&C;Be SìP#p h>:鴝l2MƮ>q; =t'tov}16F ys8-<"6Z:,|@7нhβjYb-.;tOT5Ɓ}$^w)'pi%! k' ٨*25Q-G2NTe0ZSOT;dJU{*0#hʰ˰@d[Y54Q ?R$6{Bdu{yDn%taO&Yav_$gL xvGHhZeL|h\;;vm{E6\6ܺƣіꧻR}Ǟ߫X {V<薼liɪkkӯn|ٴoʷosU(03] }Kۚwq^ukYй-4OƷ\9sSjB+sJV#/YA9s۩EA7Vj:(K`D"i#8:FnLEo}{E !I%iㅑJmc}J!,E N&D6Fn7f70DIL=h6vM~6<ͻl$rͽ !l L5"V%Q;I <"甠&+HXlJ؄ A耷ƨU1kN<`/z|E[#g6p]BT+__!NO{ӫG9|M4;q4NiM_38+. * gʌY<࿅28Km͉28_\+Kbe $ц?Β ^45Ci$<Gj4e0[Q!RM2n녞,_[&m֓la Dj)n4vW̒%Op6*~פX)%bK?qח߽Û~7>=޿SO`8i">_D Cs0^=4UlU|qks eq 'k@e?>oNTzg&h_ʤe2$sUU߭QE2pW! a08|%bVĽ$rXt7 AX@12bH"j J<E' "C,8f;=  MlǞ`d L#ZjL*DBZy9^'JH8m)!>:yªuy\w4ESͼD^nDNe ^;qGQ9)(ED)1f]򬗚<%HJU G ڒ""0iX Mbqlnx m,hX)80 U"┌m=h [ne9b%){W泆oe\VeN\Gp;@*)@ U@3$Ս/fǿl'$ UCsKFD¿@!X 43ki،l$<&X.`1b$F FyIcz#8sXB1ki6pZ4zj7!h-c{(icZel8a@} jJB17FK5Bǎy[a }jVR %v<]=- U(hm<~.E}s1%\)H* @;˧AlAqʱ6$Qs-98MX`99NJкwkLOe6GYN|Κo_3$f70 L5*Cz)FZ0^ l#&ojq*7]/`yܾXA+HҾEqP{0F(Db5L1hD`f%V꘮&c:F, rH# be}HI4"TH0K$RZ#gO5.>qڧ?Sxdΐ1nl)>P$ROgYwR ,`armHQ\@2˰ ɬ9qAK96`;`W NTnc9[^2rlR|+jxeq=_OW]USe$(σ#6YnQʛLiҙM4T}6moo ή ?4LMm#G)rDH X#E'ˆhZe1ˉWP}W;CATh .&f1&R i^IJ?bc. 0WhM@N\䴌P6gz+9n?*nx#] 8n#E!È\X!eֹ)΀pBBrs`|s@ 4xNJgsy磅!Z/ZU,VKf / 3v[%mq{b:')c3(#nx>E)xPLlC3I%m fSRfȯSh{L|0a+`xjG;F3v_SL#@CQN;~Y0Mޝfx#`Χ6bpvkxZM9.څxJMfwú()eS".Y<`۽W]Tٷ *+|{ߟ𠇝[uz{sjÂcvõeFlC%7lsPu~Q_r4^`1Q2|g~;* APx[ySy]qv,a zgٿz%?O0*-\/eՙ꣢zJ^e.[pȜ`0\@l᥷Efy0LNw|э$VKJhiM \ ȣĽ"&X9S)2FXBsӥLn q9UyO:ߏ^Q-h׋GO%a~l;|oe!Tt2Et2EJu2Ee(--7hrʙħOWVtƑƱ;%e7HWlX% BTw-'NWR[+4WCtᝡ+KyW Ѳ׮%ﵫHWXÔtNT•]!Z}+Da ѕ. .ՇZ2r|y1nBZhV:`"/$~wy4Ӽɩ# J^pq!" i֚C:Dزh4iQ?4vhhɾOPB;>;:C:]!J{zteKoD`3tU5]+D+NFO-zJo9h t`m_vk_9c'GDW'f JtS]` ]!\.BWVS+DXOWocwL_=pߝqs_gUa]Ut9&>Lb՗=N9Dd5"[Gm>h:"t@vmά:lgLz06jI.e%9RdL8yȖ#Ċ7 ^K3zr9Txw& S45N&dK=kXHcP[jd#`?&µq ZZer%nw ǀ\r! ީդ+th;@W7IW׭&s%4Dm$] A;Wv:4iZ[+4]`dg v'ъ*-ҕ%S!B;<W']!J՛+iWP&8kSwܸ .ϰwU χԵ܍X~?t}_YJ+Dp")ڹ$M'WWi:dp\Q\,,$ qdr3OWbS%>b H$waZ@+pa?[^;JI&ŜR %kL>D^D}RG~s6(+ 3hf˻= F|vEs*MhBoraEIBNUS.=QjDZl)@t {aٯJtTL.ÔF<h=え@YpMEЖPNxKj>.t` )KI 1PRrR!ehu.%C-;ʜSdZ=*D{;=ΞTV]p߷axKn쀕6W0}-M44͖6sN\izmmw{~7uj~7"qKשgо1ģt$eΔR&2%b  XL~F&Q)ML Eʜ3|g)&>n \bw{+ZOoF5kQ/NXuK08^UPe3rMfV_OUSutҭ-PU-o_i cns#v,娲QfKnr3K6xu#Xv" 8{X1s冎tuzXQbGں# I>_SDG1<ʔ?nIxU&Io6x4Ãjsn5ǭ[9Hp3Km [7<2:e{"3hϢY%&uW~ܚE;NA-!zC'JryY&6y,q\L)'oT3NTK:l#fSZ<8g'c(^E: G=tJd K#ShdYiBgTt2s3oN1zkPC,fi~{s -madl>^Mzs}o_q'U7~b(al>xS9b |np}wwCk뜿Sn`v8=La 74>$Rl $+3" +6p`|z3s^Xϵhk~2p^v4rnd ؆7ZH>D D""3~[K.%\hxp{c89IĤՠ 9ZEvtkp}`ӓq#>^HF-|Uvgņs]/ Gw C8쳔E DIǩ3e!gT ٘̍v@|oBN`+:|PhOvqtLυU LĴ< " PnBk4VЄ7Iq+@ '$KLTiyUbFfOu"Àawxlu @L X:8׋?V﷦/5m@os.} EBSA|p!f5!VYe$AQk@sQ(B:t!B D9ƽәGxJRe9 ZoȨRf z &o+ds{˘8gJ*Ё]:T1zpo&4Mc εڻ q\a69{lX\)<V`|YZUHSxAE#g:T Ωs. ;!Zf_ݟ@oH$I/2e坑ݳnt>5B"L2)ٖ7xHII"%vUyTFE-}d,IYȫk ~<>,(8O9|o&Ow[U2%ٷ;Eo\wɒ;uCۿX5 N_-q|N{1m!y|xe_#=ݛ>^N迭wgdz ﯃;cEgNJw5 $ІE>u+ pzꤜ湄.|5^+W5GKZR)-_jhMgev/Oi9,;k+@Rۻa?řbҶ'=XqMe+^kWR^G$*Al*Ņrukk%~'zry3B! bFDf"d v)IQ+R%3FBǓ0WyM+=hbwHB8s̶Gu\֣"QhXr\gs򋬵&6$Q5"CNwpqpip< wi(\wZL}77calu SC`J5 i@TChK2Xr+-RKȽ+qb/i/P<@"2- ÌO/Y$!#dU 8@M(g'9dgRɻb9-V8-XNFbeX;I} "uqZ,!kolvlv Ly^p sqQhTHpKA@$INJYa-&шy]uĿ8X~URځU!Sq蹲;~%r{׻iVg ^gi8:\NPʧn'Jsn̟ϡ6FJ_6-΋u<}Qw@k uVAA*RևTK97?̍U~ ؜LK8%93Ҁj⍷6!pP"vpj;7ьmaԷ3Ms@E[ $ 81" `#&]]GYD4UUF!Q&Id9`&2#zl֝{:xbsnn;/ ,,p6mV+?Fq2ZyD)U,Y#h!rAB`eV})ŗP3ThEݏ'tuG:f;5'34>XwRMnTQr4>G,j.ȱ)"rl2|rʠXAxD"%M/eH.1JcDXz96ڴKd?uyF&ĥBH4deڛBtBe͝QGUGg&9a༓1sCJhO) R;?* ]ֿ`&Gȴm` gڗZ}N^{(&(w%I:aن#ntaw 4CEZjU@ -s9Lq\` q剆jAu)sor%rT[ d˸ Lgۑa1!v sW<`J:O*f/7DHl_ˆYC@d"gH ].q+V%LQw2G26tП=̀+"{jcPg܍9pvO/{2y~[iz o)QiO樆U3 &s5Aנ"DE+Ch<._uHO!gvVSrʤw>Nlr>8z+v{;G#륺FH.ڞ&g%#)V}Fw`x՛){6 -* &|ȴ}t £{$tM4F{:ɋiDнLE 3mw57ߦOm7ުy5ۛjdx8*7@oqi¶1=l1~as|jI/gg oWO Zg'!7Oհ ӰFA}I'hrq>F;1Ǝn) lnJvK[JS]+O}^vG귖j[,76[6ZZA<<(q"N̓Rc'!Ȓb 0F&t"`NtڇR&cI+M .zH5jMė󞯠'-~6729eG!/`B!]l>: [Z31hnB4.[Eh?-S3\XJP5'#fcHi&N0g3}0Q7wk9wܝBz&DeZ[Y['G-`*/kϻȀ6՝Rew;t#t$|zVϤ~hqeyŠm.;>e T>i89h_H$5hňJ3ѠQukiaC:9ffkwK%a(ً2|Nb?"+rBHkE!"bPjN4-u( $ 2VP-FISo}:/!AG1w1/||KNH% gZRFygt&lJi) ly0?7ii']?PcYy̥3?tiRPMUlT)Yך к+R }3X&h7㼉Y##Ɣ)I LbڹcF"ϼH/{@Rt2n{ܙn YggU*>0|_VLM6J/ijJFs. imh q)#TXv\#1Lv$ԉE"gպ][OI+-^vFg 22#oH~ޙfGkBy5} Y}.hM! c+#"xX0]AcSDzD&׬$h%1GYo.I e%dWuGcfdd$ǚq؊NIWxb#ĽUgyD9];kn-6Κz35nQp"nMB.$P+tכ*MŸapmBR[W\ VvJdWbVl{ઐ r[PmP)LW$H)I"**.v p Jȷ m \mBe]^zp%_zKANB3Vf-c 8ܗb1OHyD/gYV\v_|{yk$&#,krgW՝B!L.[W` [W\5'ZQp r)m\*9b(1F+0kub,spN'-bMNI9|1,ayQu129zW[2Y~jf|LP4D ~Ry,o;`gzL_A=Ix2.JKR|,lКK"}(gA/>K`&eFdSmJ]9.mO;呂=i;s!]k&素 `P!z41E#Hs*H6ɺ|ў#/BcZr}4gQ}^7רb a 7u @`ixrB]K|s9;p~:ХqS79,w>3}?qd,[ >?u;%}㏵ls3Fk})~oӿ-̖^t" Sʉ *kW2}C{)ytGox3ȁJp0ZaL ELz1snZ mCinPѮG"DάE-بD\I]fI]͹w[kܞFxZ}mGJGy5[78_O\QbǏ]d?%T˼4UL:`(Eh#yPFb`H$1+s/,;hX(T82-IEɩ+:5Nm3;ծi].÷lġ8%º!՛ 4JJEb(`JW'^`Zbhq2Nsc4O67f~ bqԝq>;gKiJHZZb3&f>lb R>\kb&HJd6*ZIP+YFI;jqŢVշ0-9i>548/i48?`p۴5whzwheP^U. 85Q_]R&.@w _Z.븨$`!u*^RnJ{&>JAh]jpϜɑJ#i? TǓs 2XyIi+d \,rթi7 UM֝h7$=iukҐX)X H9@AzmsHyˁ;癰/o;4ܣ{SN+lKy)ͨgZw {fGq݇/nSyfMv}*ՠ'u?u&'$OVNnTʎUwu}Ư^Zwd9kR/Pd@C`9T.hjqe L6Y2 fhQeaFs2 /*T$9e ^ ꌩUc)4djp)猈дp(3x!dR1qi5i>j 9Z9kzhu1+ CE 욇(F˥ţ6),w" 4Y(eKNpAf1G"1v$D1)樍uVƔ&d!DchOgZȹo*BtIX|fB6%L&FRt9gJ[Q.sMb!ϩHШZhIK՘@֨R9@M(cLtd7GRx6W+!bUb+OV5Z[|ufOnpOt 9.6^-9d.bqWȈ!LFidʆA:]s1쎃n#ZrkXH}98`\%i0t.cN6:{i+ȔC ̽S2ITGSL?-khs6r9 CW-~`՜g+$ 20W{M}AsGW<-: ]|%I9!/y2!NJFERH٤8>X 6iֽs54^HIm|]ɓr]MONVs]lV*@ݢrcݔ1JL#s@20$%NT+^N {됌u܇.{XpU78zwlN% `\ْ/%LJ&+Wց2Zely#:86Y88+I00]ţD$Cldlz}7b rstde)r-,S p 9YG#r*LF0&zy (lضA6 *Pj0L#l(5G &IJ"oZ{*Si}ja]a)Ra}: 9A0`@0 q>I$){_9hC>0"cک:L f*˅,(dsjy^+HE!)Džw:yePNzR%)29+U~R^s8 K\)&U?ũkap>*1 _lʔi",gޟ$TarBqx&ӟΆIcMޯaV'~\W1Xy&s+i IԴ04݃ŽFnMR_xy~v8ղrvE..9v_|4x8]Ya/VvAB6.ЍK{F/tH(Wt4 ofs0ˬ0Ѹ(Ze'zr1xxv rvI60WnbIR>~'^yP<{ *ᡊxÆA;oo޼z˛˃ob{v P6 O¿E{pm_ƛ- 3isֳ_c\mNaC`wn$ć__dz&9O=aŞIV(]Gq_!Ȉ A=WbYDZCfj@S@.,7HmR=LBqH)&d43 LҤvQQD7pI9vc;WW]lO"& BJͣ8m=vd]q&X̒sVu&+vD26(ds1E eyÍ5԰YM5o{ :L X2f*CJTURF-hgEYUU'DZM$ 8t"+\DK=he6 $>Ro͎#I >L d9#Y 2PN)+9KQܲ9+3\:I.N J{Պե7{8|*Bq Տ+c>_}iJ[[ ̴+ӲOZn5=k2>JfjZ82#V'9dT 0WKo%ѲeSچ..)ҖGZĊHiFd:gE4h()5ZwgF]6wXjޓRъG4R)rt+}6g˶csWF+ap([WϏ]KYtx܇`DyöZȔ-O:@2n,_UJ*t%΀bB;QZMs*wz_*y:|F2" *kC'KZ4)xH "v赙q~u)7Cͮ?MYbGkBWa"Z2dXn#eN<~HFV@;0/dPYy&NR 2h<f3NXPYV^ 6''[2+bř+qs`9;a\Ԕz $ Lg$T0g.lt.%eKI묭͂,BFcBfki ϥg?` ]:u? ߮+9X0Tc/1|l{2BD) 1X# feZT.6s+sֵu9&7E?VzLz z1"`Zbf Qy "dꘘt2ti p" >ώ;{H념gŸ4l ;6 iݛ61͏6PʞJg&~ TI};OOרgߟ~ϧH 9.Ӥi뢑La+MЗ:k{pw7Z|/8 ,O=A#΂wVhW>&/+A^ _8[ᄚ[4v~>j~]uo~j(`jv27n;h`?#ǗmQ >8c4Ƌ#ϳJԛn! o ^Iӧ\aޮ"8\}_T,AcJ*=AjSbXf%A[jR^qfh lVKe;TYK\8 8}>՚1w bĕNZJ+x+n"w[{}(ض9b8{0ɭri\l 7d|:L> B`*!Lyc232(yIZab L䚨0ɚ$A)LV,Cl!KI 1P]>罥B$2LyeYc_>@r>#lzl\򺾒׃-VP,!k$PC$B%yg)rIJLXrхіaR `dP` 4$ah%7FPeZ /b_/94x7O=*+<9*V}TXO8К.柛=A>|V1.woxXenOMx@6S{{e[޿ĢZlZ+3D=@lNnm^*C2f.R!Q#4 R)Փ"1[`:QR Jf,&~XӅfƱօfЅ'Յ+o[DEj_6ieԟݚl@&b>&9GLb&%009њ)ueˣ,ށ`S=3Pmr^:mG̀*IЦwU)qkl? \vq֦`KT: fZ9%RU!!sLp!}d`)(O>[+x# D]Bf0#q>fP FuҮp1qƨh1(E#jDZX#A#qg#q2lj=L4IeCTdLeFlr`-1RdBPA&6l$6+MyԂe3>`DRIQA4;MEӥgFxS{Ћu-s:,6KՋ^d^S<#EMHvus@Lo5N gs)zz\aq>$QOR"]7{Wq i`~ '@W9ZGѧr 0;*VbaXfqFs O$ϙ!x|'dDCHY骥ͩvQ\(Y9лgr9:JeaYg؜xI~P`tebdQ.W4^ *)/DK C\YgL%O5֡wIs׳Dܰ?=Ǔ{V޿Tύ"F 0[A*)e GpϘ)q/,_VӷG|Ai0 I| !=cݓIRlGl1~0guz?ꆥ 9Z?DEsE>HM5@o#Nr6LفXX !]N$  PRƋͷlu^_8Fӥ,@PvqEŨ݃]L /Wr(K =+u`k!wV`.PơYj7m:C~A&{lpC J7U[zw0zO2i];GpIfG5 ?vMa[.~CAAW-Ϯ=wQqOxxLoj.,?!uLN%TI^*(]6;'oV̙rRJ$m.S"b]\xbMaָtZvBǝ.d{?-fYn!o&mO޷}-xp?6 iLJx\ uf5)CZzd6&F8FMA1 C zN}$XZYG$͂`S` $Ƃ$)U!FS&FFHɲQk$,N11XwjO%Xcu48 ]o.oՀWKn{m%ەgzW-߼riTfR*fN%ϴD4q ƸWqP=g<ߚ`gQɘG@T^c2G# )y0yy>m5ǫ_ȈͰd! &~ ,p>DgMd zEcJ=b~} _b-zp@0OE-/xR % + xdq5(JF)ǽWL2Իs냵^КIˤ> j5~Txa`5HTXi*i&)pI Zczp*v3g7]^ Ұ.<_y}>FX5. 3UG&ͅ! mNJ 5!&A+Vu9ېlS=j VЯVC#`dTɯilG~Xuka4NZDIӆzv&}+rl-=? s@ s`GoF s}_Jnr}o9ȫ{`]URm g{ShUEŊ]EȠnoP̣^nj-:~#˃.m1Qi.bb9 =ڟ8n/*vxhI+&Hp_?o׮gÏ jiGMQ#?x@qCzv;U* TaCmz`gZc5%ߏ=64}|jcvݴ}x?~Zα/iMfM-KӨp+كy[,Od%H%[*'ЍdKwNnM_S;^"ƦԽ s7?ެ}"|ioh͗X˷F7Ê9MCͲS4E ;"Yi?'[;V>Lg#;38/]؇=]Nns=:( X{3@[`U{wnIAg@e_=wʞoТ%#cܻ[x7{9\L ,K4,PT&Ƙ so?']o[rWޓ1GECh@wo+fmIXOL gjd"xѻΫNN,ӾPJbv1cY:#KT1 ř Jt6JEYr>#d,QF"1g %#OVSSDgyy0`H\#̹IkURcWo},֠ d́J{zQ'n'0L%ֺXf! :;d\(ǵC(a#XYǝ:-}̴16xd`J'U$~-Ϝ#ۣ0@6L>_! s+W+]VTH5xdDņ6h1aǕฃ4֮$5(`j*m3kTRu17ūr }fXg,gs pIrƣM(|u2_8.\g9"<@{ XACJ;í0!mCz&6 l.:HEJ{ 9gkHZq"M•F~Cպd|0W妍G^|rܞ-FHy6a0`G&bg~`<ΚFz49`;l GďL'"{9#.+y60]gSH{CaHimo jl8*Ҋ"lWo ls~N>">vUaGU֞|]=-•FқAܸ%])w-Q|2i4wkj=]%лigC=ڨÏnyDGfU~F7Z{<6/޴)cWm^t?0˻};.vxgX~r&fZnLGP!^Vb5:V5ـc΋e׫h U,Tܧ^(Zsr]ŃGVt*G>G2 E  6-$K -"׉~^Ho}Nwqt݆7/0'j?*Y%p&' ~ ڨ2٨hspNܪJ 9H.ؒ?ܿ]k^>c~4zm5-~!6%(H}wMln~~;RجvG+uV'avǒ}٬>] ]_+E[~Pټml3չL녘#duGՓ tKH\ҵ]㣶q@=z>}S a0kbY%.G{U?&̌1*I$L&H`hmH!e>˨S@h )K‘\+ʄJ8Lf#y+;[zf]hnƣ߿棝{sEv 5۞ =K^T>1Uȴht^ ,pg<gέET˂ܤ轒HW]rd9\YKI3fO^6AZ J:1*jl_ky+p׏GiloIvaS,_ȲzoY>̠qmY5"G4jL\cUM[ZɾQ J~eB 6(|hQjbb>&5 Dt$*$CD+QKIaoM~KrEf1Z̑L\Q:^i/Te:ZM ʍ}Bܔic_&8|@fY{m7ܴK\9Cd@\P0msE+ "br""B4̄ 8jDKӸu⸸v*EɹiNnFW]P2:}/,NNkkZ_- 5YvG9?UQyGtYy1Y_=QlNVimaY`Tut*sxV*cb\`Ahh9MWq2Tmd&vdUaa5 me,4=0*_En'&a~j4zj2}ͬpTK&3mLJd`J42s,+PJF+!KꞁGb(^HQ`S*xQ u&ێ'%*YЦuZlGl?TPvڜEy=j  Y̴D:g Jː(A)Z \RoY$L&&VEđ0>p>f,Q}&\Mx"OǾ+#"GčYxn\+Iʆ19 g69hd LH60*"ZzS6N۬4Qg|@RIq-I@-JʈXM2T{I3_\묦%"TEb4ާe#J2M%%/wH#F VQYd.մc_QㆷRFh"*&j:BT(B^>A2 #&НJͅ?1VgX%:n[֓i$=@֐ Fлa/}-W̛$Ut-%/ĸZH /Qxܻ8{T}qd'fC˗  O#*w;},lnu?EWڟgx&⩄??xrvdz/F&#Z"]sYgCmxEC~Բ n_Z7֛9ltf{(Ʒv8knzOzo|`ȃW7CIⷛS<ߏy)Qƒ9|KL3z4&ئ/nϚK}wg}{^3~އ]q kT|IUh'nުYU|-Oi8&صUiA}?cTIFt$ODB9D,a:He*aXE)k*@x*z?b}M7>L6Wmqr}msdxλƤN+&iӤL3 GYBT̃Fl (Pty=0^2#OJ9K\+ +R\YtFP7I/ITt,XJCqQk)'t֝s$k,HpJPM|=G·nW^һ 1p'xtQ[[b?[⮜?`àw5yWX Z }=`|jo>kyy-O/+w1Mt6L"Mߣa }Xis77?29k7LWBeraYi*)d:&KS=؞rCUq02i0!eFP!WKU :;FqжyʢrjZ%&qh{O%YFE\ =όwn%+yu\ g/YWD*,v51fscY톽d,/gQNLR*y;K+.%髛@W7Ibp/[#.7#GVQO/q `By9ͦw񖅙Ra 4qGh;dLp~1}ݰ Rk3OiY2rZ 4aZIǔ S{e;:UV D~OYD;g48`mv-#hKF)*x BHY]qꢔ 7egAutHA3`<.ͮb$$qpdLRLOTH*i)ĩcNk-~`z0>/{U3ر?jYv=X/=:L5C)gD' yя:ϋPhsBHJ(@C){-S^"LQ@aH!F&Am ^|_&YfkH]:h.qAXyV`$SMpg/?Գ󹪶2VK+oCe~h9Bi%u #髌iOd`ؘ@pdJ]hy8%Lῢ0(C;m'bu`FAsʨ)Q#PNcI$ƌAqTٍ3yE?.掤s^+"tgC+pSOxSp׌ OHhnYc^Z4xdRZ)X !@o=@l&-GY3[]bڠ#77Y$uNB Q‹rvq~EDd@sebP(p,DÄι6+k\PR;#ߣ gw ^gdyv\m HXx/u珦.JP1tW@ŧq\%ke UQQ\6.r˶Ӹ%!þ)Kd .Em2sLgБ§ӎC)~Bx덺CZ[ER=J%4Jߞ#EjJ$ZN1TU\$d@oҦu@{W~SNtQ'f*g)@#Y$tE$-NQpʤ|d ZGC1}CsݝsW:UJB;ˣCDcY2s.|$ 'PV'#UF ;Uk]SF{r ;%gDΡ-HSmvmkeۍbRxB#V`ы[F@.Wc?K%j&M:HD REkEЂ0"A6JML!bE->3GOh]@Jj D1&PK(&2-ց fb#u t aP:QrE&xY;yHgS*"W]t Ef3T.[3csF^8暺'y1Oj*ĻŲs] _?EՋۖy'N^&\^1M.j4v4s}y.\ߟ~_hX: 颂L(eXEg?q{U U+^MD~袭x"Dͣk^Ų|5 {pN]+0 qS}Jˏ;?^7j~8ni|qOi3*\~]\T}yT 6W.@ ȗ/Fvz4e 0D4ɕ>@CtxPBZ-$ю ۠GONG-%:X` PP*QcNho;W)z:<{R3!B~)rpoLǔHr8 ,jp$$ ja&xa(ئ1Vx> J;OuRsE]rPNxfZ%ka1! KwPܙ~Ҙ֡`1*Gϥ QT$R,O8BX0˃zDc=?ioyjA8Ӌ9`DI磺uvFLX$&c#8vY8sM1(4r 4l kE|DZ>276/5VD; 9Vוּ ٬׈@yk}sfG'B^ ~pQ>$!FXxI ٜZ/1} i`]Ah[ƝUrpxCP9 cbB pN:[9Xl+)VMP~kk!GLJMQh.Gv20㜽}㬘|byIEj?Ls0-~?/n:w*KB xr _LsD8́!tQN99z$#;3wzu4!<;lkI[HZOHt6":P(7 p.ȟTeO2,O|X&6Pb?U>Uqn+VҬ_*^t|\* \lo8͉훗hr}xqT*]qmFExv៕7Ս/g WHv3hNG?+reW$h/gUxqJ6jIچam6*o CO*+W1Kj嘣i[GeQleH biDO.|~W*'%=Ť.-Ūr&oyD~h\.~w}]}˛{{o^}5J ̢UI@@=1qp.Cs-Zw9fW.2Mt**v" ?^8|;g#,efE+c;EF2?CG7Tj,T,DЖ]>#Ы6ƭ6ҝ~$"UIH&Z4bJ("LRvQDĈ7\8b9RD/`'=aʊz죋YŠhU>`C17Q,e(C:cw4oc^J$BBBu:9יy_;É%P~ӝ̳"Z5XPR^x3A4x*x Cנ{v jŸ'?b}Lz1Ʊ0-jmg~PI Y.OCl2.ݿø6E A#°2f hhMޗOFJZjtxZZ+uM=:XKUFӃkaS-`s<$S)HIC2157`]>0{V.x3'9pt9j6v_u^kºILG2a1d")ePdfh{ jrP&ٔ Į2 *s( ]e(P9+p@ ~u+vp(*C+{]e(3dWQbW`]epZ{vP"zJ*I`+3x]qs۠rjW|unTo+Q)"p 8./_Yhos:$Vp8m2 Fh}gJ)6 ٴRZCbW`U*wv4ó`W߭m݅sdg^Guf,4U_Fyȉ {stZC"GփiV2/ATr? 4J2_:ɵdCUl A T,Jv>Bn7J BާVgUA2țk#8s#ԬF[ ރF%k >Úݜ|OF:wK9jsiwkc0mƀ6 3!5{b>iyZm$YчdP둇 -U0ss`J"s VީZڳZaB ;[k#Ҋ IeP4BZ% lt)K (<4{4:<4Gta0P@PyUoܕXɠ-]^ [Fhcn eeXLčd] є@ ݑixu dFCVj;xXUAEV$uc#6JQiP6Pc.roPPCohAydse~X|/H1Ű&؁~oktufWHiRTL`6(:\-GP5G1jTPTZDJg=l4W-P^f$߈&#lmv]Eo{jPB]:=n +Z#q2Fmͷ.Q5Z Ȅv%H  OQU0uk)& AŜU' sG!!.A D5t)Ԛ8B`@9|- vN]=TnL6 _ b7^PX-6S )ti,ǧ~ڳ;!JPA/u: qJ){6Օ/H!j{UQRA}kGAyǒ_5N_*}A׭!pPBY& Y,kOM2+V*{M.dPgF\@DbA\1,4WU)*K;L'Q_r{0g@(E&rZ#d^S1P>KÙ.mKc.%f0A 9ɐQgk#ԭHw 3xh}`QՅU,TG7ψXżs >jJz%BpuZa!ٽ7Gm ȋU TɣB- .zp7P@R@"pc2{"<ѫf W nk 9%bkVcCX DA5DC7HX] @nK,brjFF^ZF!/D(D td+! "Ϊ$12iU(- ɀ!jPk GEDޙ7UeXEǂp]# BPmʉB@ڳΚ&ѠTf5áRk4㥷:x$T`!-!ZvH¦lpݢ> Vs`\qjh}6`|t=_vX6˴|U\ϕIvY0 36{ 6YyGk0v໳!bjѭ^k1i3jFq5R 2j31Vr|G%з^UfMj:=n(!/{úSi樇*r#as":U'%\aۊLv0+.(H H,@z|!(!==`uÎWVh]bDuN M~k!G!z7(oV1Z8R.Ba E5F#trIFn{]zV@*cKULmqLG[Pc@sກ6|]-+5^tVzPkFQk3T;*\Aj},ޣ~]z{z׽G&&÷˼s jX*>Y;Tڠ@vi ֚&-m(-\h䋑Q+u!Y?-Д nF[k|TW=ؗ`Uc6\WAۥ11DL0r";@j˯*y;u!k't\];WWW,"$R Ge^ קYrvvqgg9ߴݙ*＀203r!N翡8}wwjq @V-{#^>z_ͳ|GIo6l#J.4pLO5 N b\f*ZeLLŇp͜3 1 [B̒ry!XS+ùN)7Zp A-)8Zj&x G %I-~Yv4b- 1}Xx۲noYczMGs p=>ꎕccj!ՀБr׵Gw~볋6 wܢ ǔUzfsƇo4M0ya:BӌvU%$Y9DY'+4tpcwbZT!U!%7]?Gվ]1z4c1΃BWCW)!o=bBW(՟%j'dZ]h -=h7nϬvBW;Ja"`g4tp)BWvCoK BW0fN 0CW MCW6оBWHW<=+7,t|kܷc> ]1؉ 5Ө+FK{ BWHW{+{~N&qRӍ}iLs1 .; rtr4'4yZX]j43;oJ9{S`ܵG0AP|q7a' r @G~ׯ'w߮e\U?_"}yE)r&GuRZ@Undƹ/ϫoCư>)Į>?OqEvsM}{ Wo*ק|޶}d޼½ou~ش Jpɗozd GsYח潝o-|?\Oc` w|Wo7}כo_,gϟ[ϡ}nɺּX[6VKʥT{ /_7_nu?A&K'eaC \]ZEZu3'=CkRgQ$h}4}VqZusꭲnOMA6-? ~FQyG tYXgF|G@Y1h>Z.$:)B8O"P2zhƢt$)brK%`}{KH0cLʍُJ6,̦bΌj“bJԷ61"_6ҏ滫+םl@ixT8b#ML:L|d*00)*16!…EM) =IM1dOP` yIl;tхT;8N'~<ΦG6FyQo@9KfZ#B BR,CRrp ARl^<)Y1 D hG%g!QNq{>&~d9srE ǒhBj)F"X! <I'ҜSG(5am`KL@ r^(%yJ1(L˂~eżBŻRto 1Z`{F$?^UVjT _NV\5s}@V8*-y@Dnnqr79%~ȦY2cUx_t>TU&* s>qM${]k({ JB'\q!x!Fҵ̽$ GCYcTNr `6:onpY=]~i@n֭MgLeo{y:eUɞHdXuxBҭt$fwթO dx4zJP*]wW9T*?|h6.M;m=]ug?y3Ev>'-[~u.|_Fc WFMJ/ Ų Z-4`~&.=s~R9X~>*s&pGI X&tdeݬ}it rڟ vf{Z\M>[{ƕ Y urFV#Pk)Y&G#x@ BN`!-Ejj㠽pX@c!,s%0l\܂lpv@w)]TT%weNM=,`7۪_;ֿt/f>=kP8$2O"E-/2p!3 pWiEw8T J# Vp#wRm(q='%p`?&><BOwM<]EٟdQ5.~Z._~ްn,gc3 d0B0ʛZ_QQ>P߷P9SΉeuGwؙ _ I0Ԅ p1?}ߧ!6SU :aꋦdѼL`uOTQ v*fypuJk_7L]s }ߓv2f6iLk VY Mu SC 9SfVM )>tÚ6NFtIkxPtot6Ku_i70ާAۿ~lGtq}`C47!E?.q 6wݕɨN,\cOhҙ3YNӥ&='˵Y& A9dx*2noZ5!=6f,jT09X?M7r˝:i8,8hcJ6j)1”>*bPrB Bpq ,n,Aw<Ĵ HR.'#w8 LF%7-5ϻsPQVZIH0åT E, W;dG؉X8JW N XIt%WQd*8J 3+1RGc,Z ͧHl>#W%۷_{pzI3Ssػ6%W/"d 8k K"JEV dQc(Θ# r!gN2ZQ)xT(NG)!fFp A363.*߄XnTuLL}|D냍Q0a>vP}قyzB5r QE82zJ[2cݟ?}mo²:TԊXV.P "aEBJr8_Ҡ.K.#P ȍSCʢ3̅`6HNY(gp)ɱ b.ش5t+[yK1OB2Dy* *.wM""Ѩ`-xo3:9dNZƴp^6%9դPlem=\M>bo,m2P"L^li$%qpH$PU,0qH" M7} zjh46v'*k 5&F:L 3.ea6qhPbKkD ]r9U?`F`cd@dh2#p}=\3!G3}`@9Ql? }| T>Av%TT1|]@:ls-Cr:nHNpAO[2LH+rtr1K)m[zݣYqgiDq]9p,h%%I;+|Y͗/VOL@٩Z(`C(.Eh}0`MGn))+:\([.Eas)BL% Y*LC KƗF(ȼYPnGbf^"GMfP- 91+K8/*d ](>NstАL_B )8Gm8dh.SpcUeXc誏zkP[|$ԇ4F8.^ J_ζKVul~{Cxԩ_Ͼ\oMH\3| 䩄S'#a巟-vr?aP9' =sV`YqJT l*u9JFV zn({'x&Se:{i ij!7S$Ʉ*+1@3Nu>@NsziOZ-wळZ JXV^roсULhs,lIkS3wwWo+RJ$ J$+ z~MA!0s`,iK}p4sQj>ac]gkAf!#aQ2s2#l8'D߇=D} ȼO#31ƜvBĢrҮU0BoLAОǵ<5ɀ\8PI" A]p֟Pj79A㰐?;R]'Bk&Z fxAaz o`>)|KVfQ/*k@o"s\*>6̯Ѧ>1ǥ}dz0Gr΍Mg: p Wj0whV?0/+/ \p,>ۅ q8*÷OV' 6|-yVdV.V5#V73 t>Pyh\s>Az4GzrbxY&W*orU+ D8huBG久Epp*WLʀ" gcKЏ|1ˆA^_g߽~Ϳ~o훳<{8 u81 wN@೦Vy:M&zhWCbnbf H׃o>~;J 'ӵzniL?~if~X)WQrYEċTSE h.k 鮖O=F{zPxDQ mR 3 M2()Iқ) yɴ!~SWgClm= SV(*lN=}"jlRTz!B:봫@Scm; 7vY:noztWV)KIYrLHxcQEJPLAʢhĒߟ~ӯn׬}1u6cV~]2)y %@b ޒ6" {nBd*dQhKx6Z*gG7#rK@ YޅJ!j1x`9iYXr mĺX-sW4^x {($X!٧>#:ٟD/ֻwDLWW ś׫v_DyQPj[A RJs& _v*"J>ΚWj8U; h(G<'0LYMFsH}IfJ '^hUM p6X~`~nop拧pOtI|^s!NC)AD}YdΙe$Wb1T鍷㝩"hv7j$oyZ$GMW2ةGŹn4l/M#|8Yx7ذRmc?%ł]<<Մ|LG!<\Ӿm~40ZN@rm2gQAPn /߹UC)}8o6}w^ (󴣾 ;BѩTTY{THy˩zjZ*Uo{KƛJ*Pb)"`5סVFo :g F9:Pњdg\ѦK LZI笑%:kGr}kuusTX8}BѸ0zuz[-5f^J9n Ύ{Ct g%Uգ>9zˋg?+/MG1ė~Ovn}lrsw9Vn(ݓ|vQ#L"2jgɈYY=O݁/;N73m/Ԫ_ 2p![mxӇx,E^p n}~TE=_~:sM5:22~j4)608tAfvf}87w p4>?JC:`&_w=p\9^^ѓI<ը  k֗QTNTtXf;ac |nm3ƫc~=ms9S/@i#@.ې20N_ "{+)ȬiXk81>ͼY6o&XzS6'C_Mld2'P 椕N@GrR NEoSa&pn56bpF#.XE\U3mW".ʶG\r]q90tV+ktW:vB5]54~NW6j3`KWRW4-SW]AOWv=W uݡ++;CWVՑѕPJvu+th3m+B{uut%au03tEp ]Zm+B{:F4W+KmM+tEh]՘#+]+B孧+B)zuut5$2lX5+tEhmP.X=]]LۺM*zp7Iz6YRZl80Q 0u37 9v3>m1h)`o?mmci0.΄w,( >15R@V,  ҵ7Q{z+;o}kMNEOW_ ]5^0tᇦZvfhc]mY.RЕjۮV}/otŭOY)B[%7B+k;]!JЦ#+a`WύZuEhn;]J1ҕV)!B;7TW; Kdou}0El&А|Pӳj? ƣ`ك~|]}LN n7#E#7FdlɧQ$ |3^^_Cgvl&`L~Z.wsD#vV;ݘ7Ŭs4.'ow+F' t|*%E`vYiݩgתJ0u ԝ3wMy/Sp emA@g\l +.6vPZѻGb+׊ӈ6Dw[ .tf֯"J-YOWGHWZ ?c]TUtr單 thCwԔkNӄRi2&xT%;ZB%1x0t'fGptn=]J{jJ1-6 fh: ]mR9z=]mY݂=6lug: ]!Zsf(Jpδ]!`+]W֫+B #+%v~8רebW=]= ]ھ+$ ]\LКK=]])T lX?Aަ*"ZcWHWZHCtĮNO cWGIWǵ2d*{ Y:_Ϳ}g\{I3W!@R[!lq6_RWLj+}E"W+x-w(H%W3WR1%>6x j\]+k#XKZTf>r^-:NLk/;F[UK#T1B-KNǢ W_p[~#;'y5ݜ|W7ylvmw'1P^z/VdYÿZk]ƃY^,Z6wo.\/l[f';`/YMTrU}(j RHpRk'Zbkr#A}}>,-VyY@܆bci{@qfwe,.ܩ֎9\* /y/WѠ㬧,u=㰟Xz'CV5jEͿZLr; gc5 4cYA[(/BjՏWtߎx{;u0gwUֻ}_mVm'^PqH(~ѭ[PNy_ŪxP/yFBwhTTJ%%^nqZ'_J1c%GNt{cMIžy#gzåj_j@I2\/CJ0?^<60WJsKϾx=d_g(tc~X>۫-n>'z}iFIwnuQ;: *mnZm)Kas?h!5s7H nhh}è s\m3\5%l9lv_CAmaT=[i,znstvxӞeDCtZ23ڦ>Ϝdjcf#\E(mz$I_0[:69FcLnq҈ϧ7v^{hS6Әct'|7gk`|a xx_ʢ.)xf~KY/9x}<]"@8xd|Xn-{Ap87so}"gQ!Us-*[˵,Rk&-Tv1eѵ,#e9[6v7*OZ94lo @,ۛ Z?.VWfhgԘW@e,rQzś>k By|5~;!>?:nF}EGm9#kGv}ivܫ|(wGEwou?Ƚ*q >w J<,s@us?2S߹t4̴ Rd(AT^ Dǽp)=#qUSM&'V?M*E6+*k]ό ɭg 5LWrq1YiJZ%g$~<]ZX!zۍIȚJM5&LZkiŘ#Ĵ@W$Ճ+G,ZpEjqE*qu1[SvX"GISl՛9 h\.ajŁv8RNlL7WnS㹨W$փ+Y-ƲqE*%quk*•X5.WP䳫vKN;q%tbZ[ UPST!\\A\\)jՓfJ;WW$WZpEj; WG+~ٔnm3gq{jS!;ݚM࡫A9&`Ծ 1M*1}9[S g++j6u\J3_=F\ ^Hqjn ~WDR9:F\yi~3ϮH3 |ϡ;-{ FWzjZ-a*npg\=ucpEW0zVrJg\!$֫p֓]Ag\Zi+RiČ#ĕby[ V HgWP):O̸:"\a;kGOqs!v.%Yb^}} 2Sd-; Xu wf}5m1ߺcpZ9'Un ;3muE%6;UO />6^9gZ=m s}<%Jۃ?+0HS\Z&+5!G+]究mЂC?SLSɝJ o*´垹z3CPLClꘆ5cU0M=pE+:VufR;"TN W+/LW^XUMǐk]5D+?u\A|gS&R(raajRLkΎ;)CϥU+.P\AT\Az "T3WB 9L0-J1u\Jf\!QHB(׻ZpTNlW+6h(+[+R)̌#ĕ̱pEU=OJ?gWLj+5h4X VKiS8:WWN z7?_r`3NJo'5V+]E&jJuLNӤrb;̘~L;7h(XjpEr-WMsVy|ւ+R+&r՛9[r Vzn\u`\ Sk4g7LGqԡo7L3\\gjC1LquLqYSvEW$ZpTNW/>zyn R:f=ŏbhjpдvII\;w~:k'wF5Π=,r(?.tv75..V)lB(ow1vI?Ǘh:\,o65ew54}6mw)mo !o~svN /sۯ}hɉ\z9tw9;]ضf>G֛wQu_-vo3 PG__F't?O| }05o3R>2B0FK4+dOw?܆΢Kͯ ܔe.YuʖM/ 9\RRhnA,"7+#Y, Ӓ0K[zLB6Όal0֌xI4MWI%VoR מ!0f(Kd- Z(1s`C7VZڈI%$IdXA }k`UZ4676(kf JCz3$< ўyDh_"&3g]H\$-&nka1hbT>"u<6)1?7JU[Vb!X98 BI E H.9msCȠT%U2@ M)cmO:%*`\XhJPw  $ .論1G* XGqTP*fP_O)"4e55&EbtH 7gdBH|Db3R5do\I1#~hd(SPV; !SD"!,Ld:Z7F5d\,8\jDDRнbM|A`P*1IeSPI0iT66J@* r 2Ipp2TV%ΨCF\EUSjԞUV i~OPS9s!u[]Pl-s{ѥUݣf]HJ%eh&"Ix^,.zFg(_U0O{SDMKK+ŘbDY) D$NQn°kMBՊƕb1F'2y?biM󗭺;^KFy'*D%eR;(:_LL7^Ҵ2\Se49V&uHXǑ4΃Z$ >:$u`Vi,}p4+*Zt)ܸ H01%3v1-/5XvaByPr HPd"e+ 僕EJP*Z~\|OAHm(RVǃ٭L )n` q/}I,:;j&t G>ON[iV̶))e%) dX~#1~,~=9TX]9XGSkgZ{8_!i K`PSBRjRm%u,vxcDէV:2 LcG`6DׁK yk$!&*fSu9/k-H HZ jB^g !n! UQXՎCdCj )N6G}`KWͨ&ŶD#[tZnۨ4r|T2Б9tqVإM}D$1,PMC A;hÕ,`y׫ s.1;Ϣb-dK'R% fcv4] i-,Uf5*)8Y UxV#Uu}YE,v/XHK0HBYD}fM:,2`HuZ4WrҔw`Y6cd>+[553 Zbe PMBV  Ihd*ޢ\5 v=RԘfuZ4kH>k sѦ Zk$V:(1˥P4LzB! BH>b =>N((@z;jd}z5֕M` ?z+Ip +E@7SNcuل^V1\ة !jZcx*Gd'Zzր U @2t?xYkʾwhUȴAh`0hYɌBN3`h=pe랟FhJ7F{s|d8{ H=G!Pb.hH*놗 Z6ِCL\[-:_!b/f,hVBMjSIH"8DԅK=)r -Y\cy]juOW$z;`3;􏰛^u#Y;ًz+`7dnlѢ T whTi1B腛NHNiz![$p den&$\ɪ>١ٯanAkKX].ξEIk?̦1Vx,isqo}-R;a-K׼z߇mm"lG[CG)Lu7u9lVl/$Dodk7k1eG/J:[.S. >MNka~e-i5]wUwG4ݏ"m춁/UNPǮը*zNzMhBo;c%cNJР!5Õ{1cW;_I#2\UC #pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ WdS W ppgn0+ W@) WGi)$pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ WdS Wq C7Õ[Lc2|+ WhU Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\+Ippe` W@ WV+pu2pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"pE+2\ Wd"z5\}`L_G~֝z6ީ\FvGxm|%1$Xm p? h?xPjAc}zYWl0tUYV񃧫RJaN/Wid]GKDqgrOPkE|;Qqnr)n?,ek٣j#g\CϓzIg%iPjea-F 3ujViz9˿R0Ėif=4RmR7T}@z }r|?Kq7~$QC{&n/VZOϿVcӊ"%YnժIΉƗG;MAsn#[hZ߲FJi|mSzI(eYE>vQmQu3|Sjoù.+mqDҏhm8EFCc@ ڨm cDcbUC)Ab6ZO> _vVʵrׇ);v+\φ׍T[0k1o`hUP誢L VH#+mwscB薓utnt:gǣvw5_L4Bk9fr ~z7|>555l$X17')/928MWj8+\mBNHc_*\iBW=$Y$:1$ ]Uv0tUz}tvgejt%01yr`#*tu?= ]=5;qDW =Bi?  ]fl(tUrutUQ c+5CRW~ ]Ub0ꪢ骢4J"F س$W=*Za*JItutZ X$.CVUEҕ\Y> `-`誃BWNWDWGHWFH+DWՃ f h;x(!:Frͯkc9Jw7 MyP 7htEҴ^!= ]Uv08"ZQ c+/k6 `%fWJ9h;t(%Е&1O=^-'k~?> ]:5;yDW =b@tU_`5r(tս骾^1#+;'`_tU:6h;t-DWCWRIoJ*p WF]U:]U1ҕRFz3 `` p5hP~ѕng_ЁU^Sͱ{U;wxX_uh,%g0?OdyEv8/𕚂<ȗ\+ʻp(Ke.صG8 hN!\^Xx&>azHiUY4"Mmiaή {_^B Gw\NrKB2Ѵ:FV74jNM.C>rbtM'ҝU _s׏^.w3KwJWྃ7w4/ bKMg7ׇ>N w$9cf~ڲewO>z4Oθh#?ve=mIi1iS1^XHضA-1fPA$lژ"J͍ysrex_[7(SL t.+u,ςf,%&djmko7jt,Ƚs."F5V| fO ukM=G vq br=v1+wGu/.on]z9aڿ`sWwTkWyGn;m9,}i*w: [ S>]<ؙS )]wW4b,o6N2-o_s Nq'\ηZl;ٔ}qzCG7\?qw}u/^=.eXx EVeUh拒ǣ$Ia:mgQWk iiz)zE-FR^ (lX' [%OQߪҝ_lY ZM}uk}}VMA,.mwm;Mg[EYY cMuȣge6Z:-ŌӻOFF#w,k?~fof塀-bnwun3ys'mx՞\yoٿ\LM~hIuDX{'[RZO43ޕyHg%zmEX|Y=nrrKȠ\]ZFob2-ɭ#]Sw }2d~PY6`[Ɣ9 &eHbTRiuO(e6F.JX[UZ %"0-_(]f&1?^"\,2y2fӋjjwmpk}nk;O 99k#NK~#ŠM2H4bxŃ*2c˹ZXgzJHі:E2)s}H\:|. זB'# Gpopxߪ|Y96uu=9EXޑWPfDr^{,a5!0mllcU!lpk6qٛ!z˂SBy %ׇPJ(a$E{S\XL gJŻ{H4ngEY)k/{PѭpE}U\ƜB+UhY޶\ZQQhop0Hk噞 G[NfwȦF4S;\VȒoUlCRbU۪b@hP>ׁZ^0o"7 PHQA + OEE +)nZR78sJ6J ^]G'eZx%Z>_UyrV:l);Ӗ7Oʻ=jApX:lԡI0͍ Ɂ$M"I+R1tuH2נB24Uh:Q- lB*ٍtbXX3BS uGEf7Y3>LfNp}שoNM6/|@e//_.+Glbͧ({$41$cDRkaմdx0\xmP:E3(ː=12lrN *mGI'Ead1ttRFl7TPwڔEf{ 4Z`ʹ<"% OHr,C=xÑ:#F !X]Yuj 0:dLk#M gR43֢ёAp9xe<$IU$)/RB胴WLrRGΣqQ;CaL( ʁp¢ED抑G2b)9|T /;+gBhЌW~hB]ZۏyD6Irb}*/7O7D0=]WH3r&Kb20nZ0KYv(sz-&mS`]D{9;K.D `I @R-%4QӎBJST,I+ƙ4"#G|z.$ Q)CD'i)]Įx#gO9+Qo?,#L9m%  NS&Pi',$ R*v8#D"51T2t @$ULN\HJrRA)Ⓝ\84d­DG8b(,|mdAтcJ!20*2|I7!4gpo ؋G9X(c'U*t z3 [8Tm++$q%8ZKE& po#gD[Ue! *c Pn`+'ED֗eL[Օܜg֣߬|qJRWudѪA";Q%~-1]>39Uu@z%MwdZu'$\>wƎXͶgO—T,|g ; ;7^;zHB?v4eV%N*q9*QP`7bZJX*;NE'NjzU"d NO4eύ^+C!G7 "4ZSYc9&:4`{5~ N!A!_hTt$"`Q1$xY p'Մ K|owȩ<@ZepQ蠄0ʕ'>H%"/:)s8{Zanr|5GB20/zo?~U҂zNI@n:~'󹳼9+xvs}>!~b.!$(SZN*8T ׆BBb{tf2ʌOA,Fʃ(STB}P?3q7π{V>ts||.lbH"`7*ED0$aKEM?5ˑFCY{SRoGՊrߢfZU ì4)YOCUfv米ǯ_y;u|)GjJi~ڒ,<_@֞S$r+i1nQ]ll:Yǧ+8|q9G.쎅޺m9[vޛy9e̼Zi+?L-w,y߫!l&1|k<ٛ[bپ~;tc~ rRѵ=2kBYkcߍE!(H_%Km%J,;#h~&OF8Npv1ՖR# Z'hvYBמ֠1ؤ#`3i G*M6Q"dl\bt໾v[!uxO\V>X&4Zjdt6Ao S35}}z7[3>!0r9LHAc'!$8ߕ`ᖾiA][j*T,#kdFLM)gg[}X$|s摏}Y>vC8C.m<[;6sLz&ZN-.@MilgA4qI뾈mP״GXÃKN~ϳu~Zscwc|?;r_Y"^o VywWW Q+h|X^UErVcc3TVތ6_ %X,1Tgb8 ƲR={_ֲɂY^Xr}b@SOZXp]G<O]`}4Ӌ)sJ'+}9.{[LnF4@\*;Z IG*MDr )h y}-lu`FAs"eXZHzrQ!x1cuP\((W(wl˼Z1/y(Fky95) e~5Sd>n$ hȅSrK4nWΣֿ 7;  ?<4rGu~V XU 9!Kק}:ξ2훂"^9>`k&]rY$mqV`alw쾭[io>$'=EDuJ$ZTN1TU\U$p#Y/} CVɞ,諭Jrb/T /dQe(NC r!95?Sgـyow+,/2nQLInEflG=` ƕ~p N0 Gk lYUy+G-p/s@AU΋tbfTgɪ\]W9]U*iÐt.S Yu/@HI{5iz'~w mzفJVtm_ {GL(y __~>jt}y;*` 8~f+F19~,@>li~B3]S`QhC A^L>ǢL>[۫+yحt5fr;ZʵVzɁ.rV$%7% x0_:C*g:D, A|p u)U) NmKбU5]_VR'|w;9^ǛxD%]Zz-U3 t^9Rw@G6C A6+P8:Um`Q=c_yt* `(6& X=>>tn+,GF7>u.сr iE].fx -y:G tni (=%.ͥ 1D*xf0R @o,/Dc_P`h:O1X̼26DnLلQG,Ĭ@/DNDS_T~Z`x?uEMBE2 -Q0@#NLqQT)Azc=!2w(|ڢx!xp`ٴo; ݨD†yN[yu?Eq老Ϥ`@Lg/x6qh0Qc8&hƋayÀƂMy/mX1,~Ig#(%A1ޢ#de^X֦3 Aq-8:(M WNQm* z@MA!(s`4xknS}?<3FǕuז)+@kGɬqxHѢHj} } ȼO#|Hޙ 41*C7ܩyEULz"Sq$FEd@\qEoBǨ)ԋaa:R)ac5VgidFY*.ߎ.hJ撯3reJ%R+_N(pW`Ju)Sx\IoF}QNU˜[t(,⒉3ZJ.pi|s4x1箙]k5PP|F= Dfl~aF.O@yѶuܶ~x:?\Dۊmѕod~=7_E''o/^/Ԡ;ab6x2wbr8e7{=}{4:mo>ܾ}pl}F 0٤{?}=٥ m~xy50%%PtgV6,*?e=l9 =E/'ͳ':g\٪`O:VWD8hR乆uQ|~ynʈ".9 X{S)7(Lov6^ų(~wߟyo-oOvYOns !s'vYsM+U޼iiam=5eW Y,mڳY[onk-͙Et=I4 F5Xd4[VDH-X 3VmV6̯첏Ť$l7طIhG4,C- t rFӛ) yɴ! ~s״>^V P'U q\ԣ,I7U,EIRQ=tӯ2]DNm{՝Ӌ=<ͪaa?f3ԯ?n&Z[U)hX4: ٯ~gf;{t(Q"uhHɃMh$V-i+i}ύvXLET,Zt/\F 㙇͈R^h!˻Q)DZ-,'-Kbgpl,xT\P { oGs}rv#}hoL˻,1z {@*[ejt}y;j§y3jq&7B+!8nAhJm-4Mhe% +HZ'j+\j+B 򁮎״@ TCWY ;M]N0|t&k] ]\+k+G;lP ЕZs՘p =؍v*vJEWjR]m;TjȲTCWWZЂ;]J:BL ++,}+B j#+` Q]' 3QlV{tE( %]Q;p{vn0cn$ˏ n yZ|j1$,Yb<-]>ٍ6>h^_W;_$d^}t~KKƱ+mMͧI$|5 RAυf[n&/T1en62Fߎ/7uq{^$z~5Z*>q5)`2,/0R؀ JJs,1B9B q'T\βybW=-I_:~}y~^j^?\^^vm.KroSm[љY{{'Lx? M7QZ>_~}L޿ϛ\:hb{SHUAj;WZ;V>C({w+DəjkT_? cawR.z~S/-+ilD=4Mpe54MhU8Jm>BYZUDWճ# Zfj+B+zߝP gz8_!r=g C.$'Aٗu` "++ZY~g(i̋2%b@$pUWWU}ABU5FW@W6SU-()"+l<\b+v/+Diec+a1>|M^\],t NtutIa#+=Hp&h=]y]!]I'>x\c+v+2ڴS,$\ijB4ɠG_j(S2xt APN>>JfQSTk=\ciD =Jci˘b2" .ΡXʣ=T qJPebE=ipI=C4,Ջ+Sn;|2H}YȬ+C]i J#ЕKtTsfe tЕ%ڷ+֘ʣLtut%8. ;hu ] .WӕGl#+R]!`Е+x,tт<.%z0)Mbv"Vϰ8]g=Ϸhhᴉh(m`t t]=UM1f]yECW[j7 JKA+{+D,tj:] X`]=]1N؈ e,pE4ɠG\tQZJj.BBF%t ]Q.X<&d.\vf I!렩ӚH)FJ4%;풒#lD &,{;Mp%ױ-b{*Gb;gyLj ]!Z+Dt#+%X^B ?f7A &L3ZJw>rO*Vq]z>u4wc \u M*'u:$lHon/sF?}׍ j0_6K3<z@t2Q1 /Ϛ~ UN7doWn#{&Aʪo/Y9)0.[@{^_>̧&gw8ÆR9xzP.Xvu eG'\|?'4EKVL/&Y[}[τkJ=V}sĬykt$w a^ݜuY3{eub:kK4!~ʳc?xߙ,Fk|\P~쿝Yy$wVn׼ nM63Pzbru5n]BjsUjzQAbO'os?kʿO.o1tXf|j~<.;{?|ſn?^l':/r1.399UYQ*!jTWF x}~BU(:H.g [S=&uNG=PtU~CPG7S΃5V1aj [.FøJŴQzvi5\g|ZVclhb\3nF3h̐~ W^iO*:ë?O&0b:7V|" [sٓq0>z~/92Uw)Y^WLJ UG3w}FU0d\r[;֓PMSjHvSzʉw>BM7i[\Sd>\NrUM[H}Yvegy^eyٞ͑s/HfUql^1s؊3/f) u;pxŠ@R+L 䮝Zg״W({~V)ny9[E {1k^i tBod8n1-?mgZS>ߐ mg4pcF~INa\}Hf\<ݒN}YRq%[Νlf2A+cG9ɠm xjH[+(@V`성B)#lLkyó tuK В-~ W_ini0ӂ.liu~1Ob< 3ߜxX֟z$6FV23ѴB@R#+ hO7cޫL>]g]gt~]eFh-e]Vx_@dUwϼ2hQ0F`JMqJXtzr l,EO5a.Uн]O߿煠;aqa? O{%7<׉r\ﱄN\\K$ fiN*wN4rq"{+k#fڧSEJFu]u'T=5GC֯On/Ϋ׀N/+ Ok1\baG5ʭD |A2xƊiWh>_m 4O'gɉ?;)b,ѽ<I̒F k9?W(x3ˀ_٫m{uԝo_Oٶɶkǫ3_qW[(?1{YuA@QZc7f"/0GgĠ)EQUx;)+ 50)׍-{]k-vS+:ϼ(55`1u h"J3kiߏZYo[R*KzapN"T `JK$t4I.m4Id;YXNamZ- 2ӪQdDr( 1!I: #%dS=I(H2FWHҲ5J+EDAҬX:?4GjzǬ҅JF K`dzF.-Xp,b3ݥ$᥌IgMjߊ.v2BaddV F4}S&588m1\0keyjH$#aܸpkp1B90[/0uOTzɽ=6p eѿ/?uJ?I]$?{9[}m?7w#*)EV,62NnDR* ?h)^JɠO Q,I#W&3FHZV,V5N :ӦeCz^˂Y h˜СVH1HM d֦Ta;}:MJf fVnE<7渋h=ӂGaZ4Nsh*R!?`i&TYSəS.: K^1f&=oPkʬ7)<O׏iq"ԨV0!M4 I%F'LF^XdckЉd j:<$ Gs難]M>y=gÓOx<;TÚ}*886YUC^+4U J]1Fn*b\)VJ֪kQ?\ N*5Ηp}?%WCw\ =4OggO)p[v9U9ÿ:adeUUmŷierW8.&=egëb7ٵ%B*JY^YpZֶYEi[K0\1tP@ ׊[eqF=[ Atu`%5RN[6d`J[1/jrc_+&LR[q0DEiffۘNQt?+ gQ*\?ry_3EM9vhO7(-KTk;r\mg^FwF#p%,uv2Ŭ0|#rô{ tlSSMa痟|50h6;1Su܄MS@ȴj@lʔR(DS͚0H#\rY/,VQnѴd:V=H𫲱H +a$Av#Eڕ_/t!ݲjlm\+/ictsboo[C@4Q M0A;40&$&eJkY*eAҲPlQ4M. {qNP |kna5*+jxVQԶ4|=˶6t$ A=/qV(ٵDdV8FJQh -)΢@2Y%yZrgٶ[1\ fM4=o˶Qr[X,ram v_[ղLg̾HJ IXiWu*MM:䇥}'?yqE-'|BSlSCŊ$3=@v"$(&h'PWq@ĸj{s<%Vf˟ESfR{( %'=^pmT{?eT/S.tO`[$ 4 #I%;iU}YI^u:ǟnn}Ou4?7z:LƿEt>.ж|TL[`~Ytֈ|E}R_ysp:w0K+"썌~i3Mݭ% vVTK#OYN?{ȍ#-"&w8`,n-[n{<HɶZZ*z,6t׏UU .ud^xkY?-0{kb$e:DWev"kχ''us: @isroG"qC>]_D@)ILq5hm\pOugyœs<̝pY9pCcgb6q\'@hvcp-tFY,mJ]p5,4٦Or6!)3uIRJYeS "wFYwrM_h4sY۳<|P~q?յGY>> ˏX^+_UIֳx0{7~e5$0⺧ʒ;",J$9 b9$iyǿ/d:,==]k)c)XW~l!ұq,#/Ld[؀؂[¸Bv1eh{yV]^GC|Y.7ƚg~vصROV6w85 }4fh==AUC3rnM:{~ <15 | i liGIawm RowV.b2&m6Fĺ"ۺ/Er}d 7#ևqض!1ZXЛjS*G7JĒ'=* ?ʁEY]WC4њhCzcI 9"Y4Zʼn,tyR_ZzNժn tQX d28d.s 'r#H(w>[i߬^ai05. qW _b ()-#oUYa[iǷJ ??WdQA Pz6cӯ6`tJyvk0Lgk ʢe͞P,d(0H@&˳9ȭs6 J'QYMQ D>$30;8ecg,.6N䈔-&plX3=2bocWLL;XPzaOl`xЃ3 Nՙy*y`c٭",oUp=ήNJߝ)g!0飻۞i71M|Pޯ^UNhq5_9!M$2{o AkGe)0krB2B6wOС/Sl吤Fa+, U_q;ۈ(}͡<~%"?x-;~-ߠL{E 2w##Pڏ09g=~8Nf>nJ246&Qx380hp86ϤeF]eP} 3ROGDž% Xe0H m+( <'ˡ F3'_Y彺74k4GP)0М'`3ˁoȭ%s](hsXsG%Z%ȋf~EƖZ[(xws#qHmLJnh,Git׍mp2kAYE2~sJ X)g7Yذ~ݸ!nsytI8Ry{-FrҥU*@nϗ lVy$9P`&Flf̌vᶿZEdoa E̴GKzr~Z\lQ ZS+U J0:<`P(Y(gXAWh M"b#8WY^~4Wtp3p QibGoC3IZOAEKz!$۠ZjLh1 ׉zV: DchT|̿+?i*NЊ1h>2dmHx*ely0\`3Dk@~Օ{l${{&j){FfΗ?f{iyZOf\b*l$ ,v5̭.n G䚡_Ƽ},8k t-jw;kޫ;}U9hˣr/ ä͌bF+R^O5 qXKïZmx{vB PQ|#H<&Gn:ˆ(H7nr~wt*#Hk 0#HY1 3:D!zfQ%ȹA ~ pv/o|_k%iکSuGv=մ{ɢO+zuֿGq{#? AOΊ,l#E,%&ezfl~pv>'R0 y$Yޝ'Li;ƂN/7# c9_H7Ӫz ᷸؃+# l!>a "\-4iy3N}y,R3=8h?HgeZG'ƴ~g8?Mǰa! Դ/d_CA` ?~SǸkte+so55>6E!mn_q 5kL -(9pyUVVWt`ҧ49=/5L-BFϕRM 'û207hbS.ͺih4 dqHj3jjTR>(\zXM6$Le7mߛ+&z<kyW[r5Nʆw{)IOI+* n5).QG#P]w^+Wnϣd4Ͽ(jOBB;~^$s;_F]Zguc;IYU"C2i16tRt ݫhiaRm3w!c)Puo8jsv;z:!]!QmVۜ=VPj:ÏG%i4ZL*&)+{Sw1]BX;+%o0tT2 VE52>U5C84w:B1vfR0F8qLG x2>FtL2N3fxj(s<2zhF$쨃D! Xgc Chz3Vm9a,wڢ>>O\jΠ*fYb˭EG$)X1]~J  XI:ǎ-T-:š{esJ .ݡp ~Xn? x%./)}[Mqp5`b7;1{M;]eK#4hz9_~S&ʍ}Y/: e!".;0)wћ^ #I,Ͷ*'"HXCYЍKh.Խ-裵@9oL7ش^:>+ t|ϰO'9D>godKBkdRqpy8rUsGvZu{X>OܷW!OpR=H]HzD`4}cLSxXoոe宐cw[nX<"0op $T g-CfC:}Gqt2>LlcQd6Z/xx f p %,3Gg 69G$M8υ1߽LG/cwȧL)oVd-$=xCY\Jm$~#CLPL91[T~ E{ih`U0m,(*,oO ׇwc9e_-'rEjJi^h|+ÁꔉhYJ8` ^DY@b F*R2)t[̜,?PF`*RfDۇ qYa mm؊|K}|?\;Ot^C/d攎-Au-~?ZZ6tZݵI)4@CM.)h\QGs#7_*d\KoqPJ+).:Sd%ffIҙVҶ*3HLi Rj7L#E6Y԰%Kktx|ȳ!sW0~)~0͛B;;~;A)$ m/1i$,IJ냶A3*o9n*E75uZ|ʊ2[`\'S@# Q$3p9bH:ͳCSj'FI4{f^ zn67Lu8.rrƐd`p UED(('9mtÜnpWd) +Ti,cok=)n*F.Tx=t:D$t|÷Z1إi(u`+aQ&,a2Dc=IK[:}갈ᱽv`jsE*d1"e?Uo ;KjQ>y}ftꎊ--q!3"3.V9 ḷyf(j\f?OR;cF)VEʅ׆+lK0V [g0!GFh{N2`"!̥u=?>-Čɳj:cB,d1WlNӱ\Ć&!2 X%V+a."iz&R6 UΡxw,GaC6Ic_e9(^,HP:rD*ӕo+.5(r\{z|Ojݵy[alsltP2hi@4".f^xlX<)Y6P5T&o( GV,?F(tu8O lj:_I@ZI35lI89j,v7ՂI땹^No#H?N}JڥE xLaU]sЧS$n' c)=y%Rpd`,A.2|0^*. ߧɹ@4,, qb(Lm1E(U\< XJF]$;BU>D\Y[GJE.|êeyXOñajU:cx,k&;#Y=t+USn*~lN˖)RO܍leSbOp2 њuL(3)6V|10>J89@( m&bHFr,([* 8ZΕu`&p`h"uQ2eĽ!qo4@YoHNk׃ېf޻@ AX_?{W6r j25Vp33yIcR!S&lII $A`Rf;9X -{Kk{ejK? ,%sR81Y5rR bzY㝕)QR8H[)rS#F j]ACy]ò|z> Ei=>/;tĝ0Ġ1LHB=`0꘦ !;UUaj51f8 a kODB1- 6YߒAjʏL,ytT\D6x$ԌVVOжbb I5D9,X=uۘJ A蔔@8<@2CxO8[[d\וrztc:]]a&Gf_5މ9M8m+|;Qv"c9``}P9|{:]Z>Nv 'BÃ4Vq*j$g CC%lYu}7QiJShW̢U"υ ֩q]⦅4,3\HD=E^]a7γ8 CfO4+}=-2-e53֫0qoA܇vޱVsoY;t}Uӓ]1hp(NN@:%^SKO6-ܾ>…-Q4CR-ǨꠌS4UNG?#AiplđR&<aS%D1em?1iRr%ϦuNh4.b>rNbӇ n0t"߁eҒ rc)ܯR E!"Ϙo;w@[1*thi~`{+մzlRoY#TCPN(K!eMiqA|:^RLD S C2_Dv.LXѽ‰h;lI9{D)LB6lVNDV4~8EƕBgej)`X 8[C1t82lH_e3_-3hlpʶC& 5He LW,(&9Q}2^a~PHJQOSAw!I0g|Y,:9EXv z*-g hv,U^1ܻBhO׹wjW"c[2 YKyV)/P1<=<FX>ٛb:I'>ltoj#`ZA@K!>oc>ù/=;5hi1,p5֟nRص{V0,#kwUXJr+˳*^'T"e3m.T8jU6/UyꜤ:9j.Cit ||'I3_ȤDJP$*aR_@vl?NasÕ5zՅNHzG/IȪϡZ{bt[1ꓴGb~ >͈2Uc_& ߶yL3BH&9<5dr/X^z9]MOV0}{C@`ϸ&V|BL7hqJ7Xǂc4LLU^MB+",Y<8njvJϙ񈈑YzSXr7fJ%JUkUZdohUItafܛ&1x5ɶ#l9کMUcr!zl*,e6a}arH8u>WT<]Υ+sU$N1ZjD^_){"WSCȸgDvQ"j[|\o~p~-HSǩIn Y]e|ja ԦõHZdw^J%zH=,NCүڞڣ8lfi/Atb46Pfd%ɱͰXǢAtUpFȬqfo`0GR =h#nγO\l28h1l<.@\ uPk/nE2}x]6Zd 7C)Mzi| Rwz4ksP*D`'wtT# C)CtA,Hj{c)P%GL2\%f1VƻIӯL'Z7/Mn7ŔHcݼn4L⃂Co @SE tW£˥S-L!V؁z["Lc}١"SPd ,'UV#Â,-FD[)d&?^9a[$ MA2`dT,{~u]8$(pmrP*[U鿛u_/ao-e="?Ey [R|٤6l4Jx۪O s%7/Dh_`,SYXX [$0?~aw;ӿl L;yېvkv˞u៣_Wˮ^Wʗ]֖ѯkN'>|h а+ʍMtR"6^A7u[xBApSb{H)mlBpR `zGأA1{FfP̰)B!7Bb@퉣~; OwX)/+})# b( FB#)Hz ܄zF.N #إ*KY+kYdam)OTqv)3 WQGy.ՇZz9KYP~TE+x` %96 %*N/(2j \R'1jŝ'.gԣ+"{xoQ;~\XmeHИT#II["s2^/>€*jo\!ݴ> -ʵW>+2!C  0νoc^[pz9[9~;w^ ;i0\,E(EqvW׶PRHҚңSrQkL4R*k{}9.=oN{TXoߋ'ʨ*-6DuKV_1ZI,ncFA+R[3tiվvp*x6Xw{?Cw'#ISZp1c[d ̐5Ҿ71`{2"w|&ƽe_ewj:Ȅ4墪7F\%*dSp!Ŝ',[-ΆkJȊbYOt{u̜JV0E>yjP5֡WUFŢ/QnNfL*!jQTK*ye N 4’[YIH 2^9(4.Mn畃~s4U&e୏5%3 T5IS!sD\. n uPyϝrZy/?K+ph?&hR$**v ap? N:Evjj Jū{[.qZc>k̲AyVQ!sfW`S$(a_>X-ʹqyxYLu ȸ8f9]97Zd :Ql0oY6PpGu{ec2K|WejE"J=3a+-'21ל鬖&K =}ǺzX Un{4UBq/)7mt i &%Ru9]z+ shlP1/ $Oa/aqr zuTA?{~\lC1LAhEZGȅnUj)el9%l 7UPSoESemkՊ )&sZ .pڜ4|6\e_PQm?,T?WGa N0̥Lɉ`ga£XX`VTwLq\, ΨC0x/5[N*AFzBtH1`06(<)]1?$k !0(Yt0v1TQ1>c'cyJ0e;d컹\IufBh^}vn+ TQøwo8KٳZ q_)f-UEM/h2(a>c+?k4/Oɪ/=n>_,|P[$0?~_Q}v^̗Cn7!ԗMĔ c~9LjZr{ƥk1m5 ?sѡUe!̣򋍦Em>]_ kHTQ cӗҗ%1MkH+Ĥ2& c( .3L!)B8ǖG#bx'X|YΒp_+.?GH?6E{tK^0+Ss-f%mrBnp]7f7v8W I:ēЩvp%* YqYU.*OtL0q:ؿv3kr Qv->$x:-!g} ., kpT);9R ) e<7t@ajB,cunn@QXUҭ!jgPܵŲK`ă$˂ 2޵q#"Kb4E>^m/3[RMQO$8ffUU.mfKkX:{䎭x/r,bagM "U{$gՓôDv=a Z5|Mv9I_;cՔt^|].f_srR̝ӓ|oq@0%S.˜ӨCY F dIs n].ɕ\^wI4n5MՂr6Cb9"oPnP};wN6f%n=$nY(!fA8!?v^ _qn]X߷ FHk>AI%&BkV {IY@rN>9~pFu>hl mC0PHr28Μp(ѫr~^\y)>c@4qE8aaFkB|U3}{O^ m~hrí|q˩N A>ɴQG~5Nܺ}Oп}5e4Fe{Ӛ)} W͉ U,1VnN@ qx4^7"r'd=$rzz_;1]ٰ(%&G>q. EYߎ/e1eЈ KLhL`H܈ȋlZ;\c8,gf[~}RAv{ؗuƣ\С{KL8Y+=/ 2n@XH0`.q*`=+t|nA/0mm=iŁ`KG{>+!gZL Ly\2-*W6eؙ=9iZ!>m\:!}g'05 zP: Ӧ1qgrL * P{)[`vMc\XVQ0 vieP pIAZϰ3/Bԍa\7{[X\yO⧝|I__yd oG~ RpW[B> {b^#(ՔJ+EqbEb9E)9`-EIڃ6# \*uRzPĸea?u5Mϳ׾Nٟ vJo?<սLvmLiN;_'vSg  7ġ570$ԩ4 n0Ms=# =Og=:eDo&cQ@R;":,!B MbRGhFZ6Zho= C d%㓋mQ\J@\ ͒F5eY}$>6ԷNjR  \sdF8e4LQYPV+z+4c|VܕÎuͬ6t k,/p&rݽ=v3zX@s)i6ɗ5b!87վTCpY,&]xC z-.Z7&m O/V" 7_?iq)Sj{yD@ho:#+_ooBW7 XT|^»v~vW m#h[LAIΉ?vk~o߮"{?Nɥ`jATrcB+Ӥ\,r$rlݿC)ўvqs0-ZVc]uNC.E:LD2k ydsh:GrhxRAc 63YIeؐ(: 7'H&Y! x1*'h%C3d b爊6cE,^ ݝv/9i5n݅w}pʤݤtě՚6YFYI .zb!(!11/ɐ !f:i H7~s{?oW8^zk^iKN<ծxHT@,[ؔ|֟߿ m"վI:&E2HH_!S.VUKRBxV%`"U9++ y\O~Gm. 5[sVN2*hX:8\^O2ldd }R߁t`˦U4nHOk)`]o4]M:7caKg@pw#ɱ7fP]Lj㫸M38NDFVB9Jig!fÓv Ĉ 1n|Iy#S*QxZ,NuoWBr|&м?\k$i仂5Z=uڍL|YƑSɎHg‰gv0W9hq$L:!6$E9(c炃Q] u]?~K%Q]y%5z@W3l`08 ȷ&HX 5!Ϥ&DF&dr8J|U!K%&d}\PX<\ؓ,q JuPUu)1(b+h,UmcX4'xҜՂV"tߥ}g҂䊕-H_5DcT;POt Sʐ>ҳ_E}cx: %n$k3E"ʨbи|>"?~x<-h߭6Ǥ)u>^h ?:8Y4^85%;~ g`C?U~0Hclߕʓ5* ՞NႜB\~tڸC 8l|y[{G=DȔfMOQT9-4ϥX7{בL!SefLQG+F+.+Ap9>k| U<ĒSErt$[d5fofNcj6vά}k_Kd^:]p/֘lAGpɜqP~HdPh!,bo(h6)mpz_$AJR)wQ0r,B^™ Y颈E kfgG$ylC0m&tOponR7G"GD컑v۷ǮdֶQ (m ldibE&b+hȊkG!Fc 0{>>l_Ղp${z@g|m^ėȹKph<;N\(ZJ.-lH27#c:j_tIme9{h4/Ee-gSm:N鰣:vԜ,x$BIYDy2F-ʓ!R*m] u*J{\9=&iRqFJȀpO> [bwK*Zf\㒱tv16ۨ!@mnCUFei[(Ux2H@' 1d.] `gC}O;A8r9)똔Q2ym4@g0a7nhuLZ?L3c~Piw9՘n KXq|.0f#dH7c&ߡ4nF%@(كmNnCk] vQ92ddl)NBI[O5X=!♤R:1eC*J X"+ C*fVnw֐ep ' * hR:-:~' L-&:zNy\~g>> דN'13,dh~sJ;<)k j\=Wc!"<>?.30s 38tCaBV'p2c0fXjHpĨ^SwjBUm<$P%+ʱ7(zv҄MA{zvfNW@*8n@sC ֱl" #h K֛"2M G:.%`6 GG$!Ic,*:K*Jr u,ב㵃tCgI:vʈy=$]EKŢπ:G˕@I4V*b0Reb Xvt%I;4le^bp -$qgn,DBuI[r ^.7AZ~[̍_ooKőo3njWNcdݷߦh:Ey~ooצy'AB |̺p!T27:cW{!GЏ%K/݅òMrdN6rsnl|Ȑ"S-,XWfѐ9A'&\$܆:`&/{E =!oqCiJ+Hr9ïi@Ì˕U.yȽWX\NU(G$u+}w &QK*'QBk㜀 W*e/1%euF+V% b!\G 0%+QXgy`Է@B2?2$ظ(GB˶U@\8ﭩxa(9X/j__aP(B.$lS[%]@)dbi8ʦDh;}ds'qU E*#3dcbɿRH=x 9 9se#SX+dD1.פO [RGN"^0rʆJu]CJjr,A~8A~C[ +>(MTahTk#O=4ͩC6PɫU_Uz*꼋F7lh}7o;=4sVF{ӥ\`@aK)ZVyefAyyvmNV'm\0]0;+JmPG<tQ(A.IA J]Gt%9ջ֏z&O$%R ͂5u*v%@rTu(}*o.lTJUQ]F5tmgwnbQiibHi}˵~JbjE\w]^O.9[F@PWMTڌgvRŔё[gYlg;G_:{]NNמrʔtnP3)ny!]d(#G[Z꫑2un!W_.qP騳}mA?{%{.x.~8?>~y~~Sg(o{b8=gt輜>xVοclfoÙ. ż&>#LX+1:;Jcroy+{S<]88^?:qRo9K T<)/Fz3%0r`}#^ZuFZ >y^ZwS1b.dw"H?g3#fRZ!iEbE,\zYPRw7` JQjFPS;mCk!xYPRo6}$.@;.ڥKچ#e.pAƃcr`h@͡ Z~y+w<hw ,.ZɿG~`qFRu%8qKC9c7|9ѹ6 3>m9.EvOB%k+AedU1R"iq .0ܤ@>x)6KRX%)`鹦p hV[YĮ~(2 Į!XjAAsܷ?0)*o*FΎl]GyٿK찤tvoY o-K R Ȃ8$W#D߸Qd1(B:"\"ZkԆ1eʨF1DUYAc2Fs~oo/BQbC:>K%>۾!}`8O\kWc?ǫ#9k>.\0rƅ@VUYEBʵ `%5GB([1Ԑ;Xl!Ş0I*p>sy.|3|=ڮrN\JI[)MUu{qS 0=1 KHes)V͢SIhvڡN'J+ԇ:"*p,e#@\MBMaYwAa57->L> &gQ ]<;g4/@1I4RT"Ġol% ň!!zr-}YWOpuoѨW54?1_<ˣ;8ѳp-U C_Jm"[N0Jn+"RP1|8ŅIxBj.\B,*r.+sa՝mJ+60ܥPj!ʞud)# \x@r$G*2:['M/PWKwg_Y"7:|{tw[}"-06r L& wܓUc9JVIky!Z~TYuݪז$H,{pm63DHdidzP>C ef+YP;͢J3Ɣjq V 9_kj' ?޽[z]~GtD 1+tuJ{ɛy2#eFLc^Hvnhd%{LMR[P1צQU V]HTo%68ɑ`:pIl_[̺,/U:'޾)Y4(xʆ*hآÎ{ZbFT֝wxM#ҽ͒,؛GwW)QTwLjVy,1wh>´)2/bc]*IJ'YAE/Z+f yY;k]~`@v!%pnUjQe%SMCٔ(!"-JmFv*ت2BRD els:-}mĉ7dhQjK_/f1n dͬt3GAy!A=m6z|6#lFٌ೙23õwjJrsKyċqnl4]N',;~\N_eIGצ#* ¾owx wmKq$IWdz[ ).2ӮXϼ팍URKM#zDH ! L5I~_!ah8233Q|U*xtYϼ+n(00|`=Ȋѽq!ΊO{ tT@A2X)TKڱ܅uJp0~Ri EV25:tqByw Y~e~ ,Wh?GW.Y"ilJ~jȲh2OTPcD.>?LgB M63wl;I_"/G_~cf(wR&a5lM$ n`ƂGY[/ǸLJ)2$|ItF@n^$Og!d/~"xb/NN^y˥]a7~<yaxLQY.xғ݋yәpILBK^ۗ'o^65~0{OK8Ӌrb79Z9=b/=}׮Kv6݅`Pn^٪P/PvގXg/}?5yXrUvq[L]wpK8UiL4Xu:dH+6u0uK`r.N ;iMyon'& @&h&`Hysn'y"wX|HRE4v42sTd,0(xt?ẗ́ Ft. ^Ⱥ§@urDž)(%JK|8rc >Iq>J3򄪬/l.) ST 4T]n;祴EWvJWK<8eYkrT$jkb^՞ n''19IIjLNRf-5HlL""0 X@/CRj IUcH: .+@Af Cs-MoX%5&(lӥY[>[xFG2gZ_SBRw͛ i'[aXӄgI{]I2;pͥ`$ KmL9q4<4Yh S n,vDDF_e[Qҷ,^X2(U* 2d> eVpיTtbgs:&ؘ+آ( 9.=q Sa, Qx컆P6|\6v#h?2cV6%F 6Xǝ'ghs ـ(f]kN&o="ZLQs]KX?7x[D!J\J'?ӳ0wuLKe#Ы?.,݂}1H{^Vu}ЩuGFf2:KΏiEFN,M 678mh]8%\jIq$~!,e6䷏y? RSpK)Y]VY5Rmlԃ%8H>Gvhz/Z F..;]?a'mdV_b?z:e,a,9cMdJRUQƉ) {%hogx 3:=(o° .9ē*؀=ڭn¾Ycv4^ah7ϭGIuݢw.y"b41lpn߭[q➂{/]WsVίG<ܻXo!VKSgىq~r2}7/c71 ۫ ̛aC>jDֶf&m[o|Ӊ:vOS59__?mPi}%z`v;VYgYLY ;7Kk!`=Bxcl|9·#ka !\o%{VpNHs\w7u|DoWp0\S ǸF`Ԡ{u֨nþN/x2 Qk5;ǣJU.8=%.hwZÚIXwa͍T$w72Zĺx ka Ffx %O&Ics4Q֚H]= 'I{O Sb[?x(jMtW'05pq鳞{azeOA}%OƗzKqt8dmK/9qOut[lŠqڗQ=(=Iػܤ\KPsbFЉlׅu&)KZm#Hvyj݀ vv1_!DS䞍 rꞠe0;v4U|Jg$`f6_f-h ;݋PǏ2JxV3F&Y2e{]:P l|4ܯcxvv={8ɰQ b=%m]Å 獿?{/!Vpٓ '% ׹+{o`ke?8{21ω=X1w5=l('+{X On$ГJ^;ɷ66|Qڗ}*x)Ҡ"S|~PM 17z~]|hhÃx:8= 'CE,{Efޕ6^rBPd/Khx=ּ;qL+sW6Mz\I#7Gb'B{љ 'p׸x\4ŝugvއ;/Ewk2p1Llo"6>o.ez< :?^M gW<sj88cowub%Qs3ߣѪؗ T} ŵVcU*x9V3d;F7t^tUsh o&2'F]'K\&&cNA)|{8NiV=oFR%W7kvvҽp!M1s _k]ѯN W_gSkkWljGJNOQxfz$R;v n|$.ԅ(6 %TUHqwkE3U'BoIA(K`U2pq(C$k S_U"z0iPu­5t`)J`"l=_r>}>u!FqΑiH9'\'okko>6g[a\ 0:roݖL-)Ep~ PYxL&Dl)AGS!_ZbT=YFdX%W"ar|c8[9^J=h_߾-c3Ʃ8g45[dq= 1dEjU-PB^N &e2f+C%~޵:A-H:~Twӏ0*wC^P|lp^= 3Pt d7,#u U=;#&j2~ C+ '7A&'=gg4e=uRDrbt66۲fnpǽc` 6Rgw{>}{0`n܉HsVK{A>~C1,`{F_kE}u3(WQAfƮ@QW[A(q(9rOT V?a:&uujCbI5yach\=֐Y-FSL{.E/'[3;O _@ih޳lB~d?" G3ضbE:|`k-GŒA5f ŷM8u3}޾!!oN)5g<>-1>I+u4K9C<-/Z-,# 9I=N14S(gLFB ԰%V>QOrjV:U2Q8DټT(/h%Jq`~ˣ`Q.]joh> -y}NiF1Q|гQg6Kk"N=GPam[5 ^4@z9ڻX\4JW{kr) 6k!$ ~=}󙮁vM aC^d'6RC tE pbX;W3NCȶ23Dd(68A1B>;͸@Zm-2ұsJ %I].#O$.r3T_Y 3R$\_?`迮p+w˶0}$,f} SL)'3}@4n~u]B]yVK#*3j̴2Nٛ)5]4~wO}pDgLZ7Gsl&Nȅ!|AX,wwcnC.r+%gyF)`v}O~2%N/r *Qj=>5)a3?: &P1SЛHcGOXG6N'8(dGl-8LtWq e,Lz(,Q$2L &oƐkʱK8DC+;EMI5i0nPRTUS,m]%H¢Z|CEY񆪓^25:I47)} 0)7WsNBE+,* @5C%&  6R7-2(SD͙ts%4G*F%v hԻ?a_(^%2MJsƄ`Ae~OߗU/:!󤍵O%LsSZ9ִ {>a,ɇ\0885HJej )$׌!y78~|03н/ z!)# }'$oyN_s6s]'Y wd y1'Y՛9u F9L2B1,P-c k/ed&yMab36wm h?(5  }#]~o6s̪o&Tj}&&#XuUF// 0koQ"(Dn/9j<$Qr Qp:-anB4y 17|vl E?t eL$%OLNr&KX;bZJuA ?LZVG-K'- )!؞xj':ڥޔ#^̦M!)ܦ ^;p#GȊ[)C8gz曟LxX2}߿.نuPMUqs̞ժ]!wMIț+7cm)Y+zon򍤛rnMFc8?B n⛚ϴ Wϸ2=ne7y Ze=}BzS?_y_S]\5,:̱S _~UCQ8 bE{L MJK7ϱ60$SĤmn|N"*UbP]kGkßz*%NQ~, q!m:jp7j{ch|A_Q\F$,շ X6R6)TUcUL2 iP/Ubp[j`X fO=;VpMq!'7Jlba`g/E")u6;} h7LDS6tj죱Dv5%ҳL kܸܣҋWSFXG}XrY4noIwk7,ݐsnmt AX]cϼ$0Z , ;]j5[܏#1iA %Zjփ9|'񉛶xjh1C^ FLFr0i<9LxӧO_[݈ܣb7C^C$*FFGOQ[AΨYU,`:%OHڥTmI >b*81 \t :잎gQEK|&'P-{gp@TfQ-FGY5 2W[A<XP#.~/Ƣg\R1{噋1bnZ^탾q=JFl9p=Jۂ9 fH$r]֣Y|qO\E+}|nȋ! q&(]BQK{A>N&c* xQbJ7 9޺u߿w q-?uy=Tm=τ 3p~zɼi#f;ʴ /GDWh}/Jw7* &mA1#[$\JvCƨQWP=❬T!Qs؄g|TVRY|p]o;ENw;0>s4Ѐe|wb{.T&RIݭIFmM}t3gJsL?(9ux]2Hp)ӈxҾ ζ?]W6cg b; t"޷?o-8 (5=8s{j*tG@hVJh?R+~aNL{}VJz(Qȫkv0 &0< Hkյ}&('}ܗ/=N<$ F}]z73hVzt%q(,#G7·ZPoG6+;=e ESv~\qG+= }^Z[ql9L s"|aڭ N!:vxZ*1^9Ah#IʉR{-u76bkS]c9a=K}-6ʼn(uGۑfuRMX%R+_T#9B*Gq$]F]?ӟ91)uZuJݔeT%7RR7+<>o߻oߎ+ɑ@h7c zߏFWʟTA 7PwIICBw@ g?P(oP.x G=0ȹYC(n%Ph\m4Z $&/9Y[(E w 뤸%+Špqs0`o̹bм0G{h.]8G3͟Ȁu6duʼ%T?L`CX(2fBIAjɤ R?btlEƍ䁧],%`"e2Cc.J`J)aΏpn›r9g_jS ;y7?x`WI[TDEBp،"A#((+l$K/G'\A%t!#vvN֔iAj>-YrnZ*‡ \ tOqiL=ԴbO|2-@&n!yluHQJUPbӒG~tW7ibswV;a 3Yu%TXϝ)!AkxtWJx3q.!Gc7 g'*W9eU32lA3ȪQR)p$d62ړ!jA#Z+-#ā);J<e~wp(}N@=s1#ohX4Qdt? B D3֥Ȃ0 Ɯ7MB40E[hv.\]RMFI sJZJay'N9c 'W¢^;<>|:O%1wl*T̒0eJBt F`5}>=GMq#85[庉Rӱ]OhGS\(eבmuL&T39lʘ؆bA) tQ8jy¶+7A{x~s 5wv|%2+yĠ 8N5*pj`?F[иÃhxp/"-3DžA7,:UlS]՟W:H^cMJرbO )1"/$M0k?pdOeOLJ;M`w.UWYE x9n<@3+ϑa"0RvfM _|yNhtIv 41% Ng8E o(c %q4Y OuHӾU[NbuW 3ReBNsB>T,0LS=HLc X5!Y( "o }J u%ԙhKRWޗrxd5 T-+V7(17  ]tjj1&TQH"m,2ړCDrJ%-P(,+VW$gEIw[P ԑYhŅqszZk1VkX`_b k܄Y0-?ݿ^wS,c ĽqO ("Bi87!5~P+;QᕳF%D.žwZ.= čicFY%Lmu^K Mb'y?-e=U^Uzq2ڏL\kڏSEN uoWsԳ2D#.2!)b:3¡^I-~e gklH"%ӧ|sD"\"IeL5DcZyfM9,<[z]('Xqgc#uej5P^73hxkѿF6Cg)W+Vj~䌳 ^ =;`e+71>Fo]oMDSCy'bc"5K FOPL.UT(ھ&Fs.@@"s9+>taZ>t _IIŞuĀwȨke!!ա~3/5hdF]=HW%fe4G'BKʐgBsyS <6?8DO9Q-T\, DS7T"ifp IWGL%}SR1|E$ Zsbtl}I.]T3JYJ! \2PO؎Rs!=B&W_ vxCZkA$f  2f[&L h|?囏&&( WWmjQ:r~ta,˼V'hɬQ%۫JI{LW׭IL)BW-1R ꍪסJ6 Rij,j_?Wk e2Cc.쭤b8q0-?D=<,u8:MIdfdI=zSܱ\)ȕ9l FO)Am(ZlѴ$4Qepm옐;"4X"f9E&ҦUxbBgꔈzy:Tfe=yμJP5/tzXw  :E5o NP.q} {^|G?XWմ9X}^S$ѩX Cy)I(W W?Wv$;zT?cmj10&o}HGDm .GxAmU0@h?_Tॊ-pUGyQ3=gEf_\2cٹq03;&D0S)E4WFs>ȳ|GhMYö^ t!j ;qjs ލR?PSHWjOU1#{2ZlYJ,d˒z(.z"B*nQKZR5}c4㜻dhsl恶$yד>I c_G$cJaeZg \Tsj1q[zV 2u[ q"h\Zsg)}pb*(Ee,snǔFcRG),0mZεfsa[cjcs8xSư0gp,e{Xek-Y*L o[1'm E0rN'Y=9J(1pO Ik~`%5 Ɍjb H3)RL&F4iwa/@Zg<|e3BpHj'jp^ij! g UD}6yXsYbJ5*iף$1 DXc'6)Ojr1IMy=8dd~J2 %߅%IRh,'DDIRMڎӔe8H(I(/fLUa7oru{]N僧$Nۉ1NlgϙIO[O5PBj|E4׭Mz7&$캚UW~~׿7I9p;onq{^MggEKwmMŝĴG.DQ^+|]5ɸRf#9q kFLS)c\Ȕ0aM 2mhf0J _߿G7w\#ϐr K 7`MJV5IcP XC*o^ ?򍋅{77ep+u l:?iz¤";mY6tL|. }$yC,kv-gdYZb[%v ޏqlgF*]d57z0PuϵFOXE  gz_1ȼ_ â; 04C&0xukbnIIwE]#Yystdzl:X,U_Y]iac \mlԋ8gVӻ$V Jni;^ܝ%5uW(#"[\^_>QpI9jV'Qsa+Z?[P{DBQDdOx$b |"_MMS6ջ`Rp%{wg ÅCwH E.f.@bfCw5wvWigwvvWigwUÊqJDDԞKHh(n:$&#AMf(+٪:m1"JO?̟S4i*}-Z|G{?oqNnqhw o U0D``L>>9WW΍{frs7iBࣾ65eEu*@в9Gr3a5wDK5A/΋!z%!*IdiDFw2 DgVv&I6@?K;2?ѢO@_bo$PL&-}u KspZN UmeATtRg;(EfrgTHRKL ҁ.5<^Ax;ĴT"_a됂V*B[ER:g2MSUb:znLw2؈ȷ QYT @5;n[zAj#%;ͯhQ[(Cw2AҚr;dwR`U 컪ՓAAJlSqH$rU@:FJ(6%QJq&N)}QQ2H˷VTߑN ic-7bXl+WJ( z"webLscښ)pJL %?8K8q:܌,=pl‘'ʓ,춡3~TI eGVO$kuF4Cnm݌yӑv߇1x)\z\*ſ/|ܜ_'Fd&RQ&˱yE`4~\(h3(5,Q@JR1y5(^kQڊ WLc\ۏf%/\!o^$z!YB$+ϩmVt ]M?_w{&od7fg-6Y 0Z2 R1F/pm0 6%ƹŸ{$J++frHGy| ۣ-qff l}I0ԙ,Ie3(u|J;EǫKmu+K?BtCs)r띏b<|H'[nhH(4:J (/=q.{;-"(cQN(Q.BF$DX)C&y[0ryb8QJyx4A)@g:"%uko;)EwyCYC\ ,Om-Lۯ"s"%S-};cPJn}@)lg05=Ԝfq.[Qj ] L`)B##68mNx.(q6|..QVC"' Zz<.盵;+~~l~0y6S!Po0wvN}pS 8)y;`>L;Y37gg|47e4!u-2Rnv{]OVufi]MΰMߚjsݚYk^bbBx4!̈́x8ߕ4Rjun> YPox!C΅~&L Y{+g;?1HCz >Ox+@V'A(Y)6z˭f@^jX~S4uגz(V;۴e2cB~1gw.ؽ?.gT/(s ;[Iݺo30ך{BX<6=2socmTz積1J9$F &>@jlkp7z@~>(x6gAW K)_PBremajqRYl=^6EkLMv I0>Su׃[0\Ґb:VCx;Q-wïuP`!XhNPXξd"j7EdeYۂ!6M63c7i[t9Wp\kBHuu?L摩C1(X`]K]Q +Ea͘##$W0t2ka!e<}-T38IGJ UdE>T$Z;o$ EESwÌzZVuC -IO@$>m =VY[XѢV\鮒]%JJwUWz` [j=B8`Neаq}MR1D]0./**\5U)"4j9M]0C9x"LʸVyeS$j\$irĭ&zBvH~(j#u&UZ8IlcliHP1C7_V3{{z5Ϛ= B5 ^S&7K`fV{Cs$5I"[9wDHfr0-,J$ʮ$ci ,"! -ŮĖwH zqI;>+]}CwmZޝ{+G] nAAdmxbAJ~ȊP8"e"><L~r *vnesej'ЏkK'ɪW˫N}4a:͍ _h*>E=Z-aS\]V^,Dr鵿<7'VebF(r)l]뙙;gӿU?dя0}0_.GA痓؏i9o?0BKʫu1bv ⥟BXm<=;|inJQrWϗWv//HS|ŧiO0h^_-+&kE,\3 DO |J!+Wf,@?x/DžsqfZ_7EL1EU^Jbdsj~L \LXD_4(i3ɘ,ԓ Iΐ[l(.tpPm 3㓕>,a %LI%&NBRїqKso`a4"˵o3o>:oNjjױ|sHɍ' leS/uDࡠq?wlK6 ے-ے;ؒ_YoKdoK6%?~n%82+ 287[֤rFvoKlKS 'K6pNhbr#n|1U8$5b$o0C Y/g0b7JIqTf#,>"X],,n_,n1-gX\H1MS cSfq%,pbloc Y$TTA_5bm וcv,{7='H 8۳s!(4:z ^Wy&݇I+WsݥCD%4L IX CXiP 4txXJ8Q:7&S*jV( ~C8(EPi+N鱔.AѬ;B7&t(9U..ˠ˿"E0+ʛ7ov^5cCZ[V؈?yqc21?BqEeUa) *C7Oi#JjOi>5AjK[6(V!+87%`Ab|tpHH{t}up{DyY|ݶSeX H,TRdĈ>49&ʻ m*b# %֕pL&d7.8d?k"Wk6/iK*s zw*(m1Uw(c-XFEeؐV~3.djǼ2=I[Mi7%=EEdN@?/WH"Ū{J|Z򌸶jQhѝ#_#'{ɳpi]w"f4~/pS$;'ׁXqew6)Hq|6 L\EKm.Ͻq0y@Xɐ7Yui1Ԓ4gN@bZocqmhF1s^*~6Vp{AW/^.xB~1[~[ wƀa1?@+e}ydI*Ve6ONlPa!4R2Rfg :30>x^9t5\;dcVPٙ=~iu-23<8C)i1ʠ(#k10m/lk*@4bbuv̇Z LL[s'0Ĵ<ܦ2uoDU8Z +m5WC!)r6FZËS WOB&&wV"sc$ nyw@Iy sK%m7Ikm?x1Wbs(/.em sGF="Gچlh'u:\ !ᢤlچ["= ^xu39=ta,alc{L{+@٠asE[m;8j릠!S-s^'5rHN=(Q$G|/*¼ApI mTݭʰhOlzˍ2B>L}Ulb'tK/L >b'Og`+nh#{>"WH ^.&C]n0Dq'GV".r%QTI)1Bo xyQdT1ɉ6U-P &(e`Iqi V pݰag.`me #ΰ?/V!-RR^zQ͍"2o#;>H'W[*q ko.4l0u`j/˷$Npst@%i70x\ˇ_ήލƫo6ѠlosvwՅo9%v*ҚLۍEZpj-|9# @ƍ`5nqX;Rb-#эrzߝȊjnK :Rpp;aʅ M!C]RGk5T/vWS^?ml邾u΅x8Ɯ0hsּ֣k18.y;ڬF3%^A[: ʨUnWMw V[mD3^S nI ݜf轤X}0ݚ; *t,Xߎ Ec5%}=#$F+S*9TTxk;]#PZl'+l7 AHi/}I>) O`,2rĂ1U9Zs>\Q,vQSMNi~H:J0%H.Jr)@ӆ؄38q:8 :`!=ԫ`Q=@(+5M@آl&cюЮ`׍2dHf%agkqF;\47YA^vy W48~EI3n 07}9zpOD5IdyCni y΃liL9SsiLrjN_sD,dG4b IKhr5l௖:lm}[\89?2|y{% ΃HQEE)A @wE83!"Z&_ى?NwM׵4jj@{D}54 yІ ̔}DK=''}XE@pE+Z+-҂J828<xT6r4z2қvЕH ۘ%>y?iP?DܶnklYg⶟5'Ѷ6ebl*%UPRBk=n LKb+}) cjg֑j.˼0D->7X@AuZ͠V4+hҚF I$|3y K玦 2N Yw[FʹQF-xKA 5T6 /JMqrc+`fB%P$.BMDцzPє|Wv(.s3_`7bEՑ'^'.Ĥ"Fb|%1V REΣ ^1SUrS ;!~ tT*o4ʅ(*pXj膢^hI=At'A! |~CGPT{㆞!R .*4cJݐVh}߼~ݡza~MyZCWP:Yu[<扞٨.`[*a Uy/pXhkynw]\9PGVqxCBL$&a$MG( mG {`qtXMPD'aAe$64_ѳ];v k{!v{.\6,*uGͱhwD]t~\7sQFp9!WWcRe\^nPٙl Amy7%w;SBd{UBdS#@ `*Ku/qMUg ASQ}(0iTӦ<@->8#H͵_g}dd$wLvˏGGa fZC p(EH̀<'N*ĀTl!twxw?]ŠK+V'hpu I1TE\r#:bV.67pC.t*̠XU$,QjƋ-'qs*ܒQ wN%p-:/!$9>j,UFy˴t_Z4+43RV2Ƹ,@Qb%K(m_9 S+|sz!Z >~74$`]k7!< /g7|I~Z`{$m#gBkMw q*z{ wANBvbvR]e̊W# wjO!\ɎK1O'^2U?{Λ[ Fq7A Z6j :! -) ;)+ RłB)e(MQkP2h{[28-uG*h @Ҕ+wW~ݠҦ}le㟭gRB&JZ^̽{o52nO79d+EF=a̭ )-J)"v 0SJy.}c>];8͆1\v3h\"͢)H@z9DS\\qX v\~㻴Ef҈bv)JIiDi=JcXMOm[z f+a$'@f|?0?.~ n/ 9dW9`&ç}bgRE' sT1QeEPHM8H R`.)` `S&]|RӿAd ~SM5X;sN Q-iNHvx2a>OiS *͋#{( Zr7n^%C$`7ߘb-~r>L '/_ighH."oo;9u[+dMj//KDȾԪ\}VۥԴj復$Э~MJ_( M QfQe]|eJޞtO&~,,\./\ȕ?)jϻkLT%2l`Z|V s9UVf'|*+"hb@KO#&ǔR]GdŗkW mfRyޜ r\N;G L۵2agʩ 12eJǍ`p(4pYcc\R%Ɉ??u'x6H}\JCB&b:xN'Х_{vRH>n<V;@rAR!)$fN;JE`?Jgx<*ݷNo:Ps>G t遘Pi1fg1u0ƃo/\vk-ljq򴒇'Te-$޽]Ѣ{3&=7u4L?߆0M/Ғ_4aʯnKo_aq8.I^s۔lλ0N w6L%k@IUcf\FM#y ӏy9^ G/ޜ͋r݋*fW#kW"TL(huR`iW(5pO9 5t)h0Y8&Cߊt^|Q[PmM. I- F|/dK/dKeTzkQ?~$!JU(;r[+n2'ß 2'ß/ 5y\ CQ8/,Ig6}[\}[o{ [l*87'f.u_u..0ˋt/#]!geWj6hu`0uٔŴH0,zXsg*L?9aA2BS"Ĺ'^\ 7_nWg'韻8pݾ.\w„ djCClbwvW-qjz,7XbȢMOI2Vz|Y*~ncJd>o=SE䯷6qRFA=iEHDOG{>ŸfQbq[y(A+!g *rz= N_QIIHjFSf^{/ڟ!D,gu`i{]Gk#D5A,PGD¤4mWa5 @6ٞ"=hY|ѲBI*4R8,0L:`@/SkJ#9))BR&4K'* R~*ԶNG ks eh|t:UAH #'06in2k8Yxܡ\\pr/wGC%AT~p9[j$}ȓ[yOc8rY.ݯG9SdljB$cC/?큶}ayq]ѴpY0ȇAA مp4.ۅ ,bH"Fs8]Aw2jhQfHfwJv|l)P*>%h7$gR4wcxr8}/v9\ eaɽ`Ld] B*8 ! cX#EN[M&2#^瓎ʍJ3),P%s%# zȊ:V}rʍK*n~.݉:)՟Pt fe]U2~qLϽ96xn\p"_PԄJ¤KWI)%D!ӕRt;7.k;3XUZ;␯4S:_iO-~_E%a{^Spetn' *25Isd++|i?8FZnj;p=:C\xjq@9pH*Uj&c̢o桑YŀQ6,6ŷ!ȕ׏ȷ!4ٷW=2>IdD2vďjy^ ynhI}rb/pl#8RPP-rHMw^] I& V V<NbJ1)O|h9GZaLn ,1 ȔYW=VM%J4fs?VNᯪeWTL:xR8w~|h0e(h P(ET~%@ݻE (*oX yQc5~^wQ܉f[bkTLX%x}jE}X.΋\?|LN3yzFX$0'cY_mWnPV + ?o!t@'A QwNMAٸ4I֪fgPBkֺQR~ 9iX?~v)eH,ʫu:0Ftr˭Dƾv l5&O`{6&n$J(Ar"YboaOV$q>YݛDII\z/$tj$#JX$C( "M5w /zՠ HU? q&x&!l Jr[)^1ԧ%Th}Nd4?'aX >^ M7 v`K-#{WdbrܗRX\  X΍=Z|L嚄-<|z\AgclI1VGrJMIqIrP_pb\jIhAoQ @= ry$ Po@quړDRg;qQRf?[ol35xBkPbY Z"RSDp;hK !Ik .B2A[)&lDhmذ`\\76,X8۰}a%CUҜ!>~x3;5xL+B>j٧=DguF rX*g<bzN9 !MM?^%gX Ҕ ER{&:FT(<ߛeMfo#|=WF.j QPl8s~<qXpV_LgxjN ~wŋgǘ\]Hc=QEuӛ@S;)WrwMLx0;;_H/²S?i#]n8+g$"=tKŠ 뤎F.4DJ~LZ6k[b\IJU0Qa'5P D`-AgYF h4 Bb9[±ՊaMB2]J%9|'Q캾,K/qcJk{Be"&:u6Dhiα# .8+g$t7>tKŠ 뤎FP֎[%/tCrM)e}n4a1H} [%/tCrS`CB-fWʆzF ME}fo'؁Y{z余?[rܞIݓyx lAKY*lTNn,C snCɹnE9 B Es(:sөQɝOdhz`9|شbS=ۊ`2S<ۂjG3%LJ!$fA |4!f>5o:%EY r_xZeGazԶWF#[##>'XkH$ԀR_2-PʗQ! s k 0>D%DȒ2kMq6zbIAF]o:)Tnpd &0d"-)C|R_К_B|R3s!>h=Ɔ :K%%LS>& *-Հ>|]+10/2ٴZTn^V/*}FV!ח^Ex9g7?Oǿ@|g7}L Iq0ʫf٨׿-R)ګz`<$n|90|`c>CN8΀&4{ e.;齻fYw^c MBß?}0Tw$aepklنf_vX-x~NJ΋2 B-ѩՅdrx4DkT(V X7;_bYRTH'[nhiRQd2KYs轓YkTbAm.pi]zd1.8+g$78䗊AIt;Iw-ݒTu!_9&T_O5}X/[*MX'u6VlW!yM[<c'8ё6AƽGv6-襌AY"_>KGQ ΃DBya|ovsی L8Ckͅ9[/ْZ GyCZ^Ltj?lɃ(Qz-JlgK%Tn؇lI5ѹ Qr>Djs u ?<{%DC,8ur%C&ՠ:`O}7Goqo+1aQ-cZ&!;ye˱i;;},ЃβUߵUߊlתoEvl'QwV}%٪?Uߊ2G\ C??T)Dn-vg zz zȲi Xrh<8fqqUU ^\|gz(/-Jm*v^C'W%XUB9؝?ǒ^ x~r>yt6>=bUP5Hb2uv`WsX;?[e["෤XI\tn> p@XFQF!8-_:.RWKbF:d,Wj;gxl*vIyMB6CFnd$.߅ɦc<-u[ܭxa7w\$P%Q&kNojԂo 7l$%{=!l ` 7)-'B9%BVרwQ Q^+4IJc4[`w\Wj[R%Q"|cTO$GJ)Hz=)ohsCDJ$KH"%2JzݯvuI[:nD$RD}=h%D$H=h59u֍h  uCv3וfѐS츽.3=+Ob)H ?=D+=x)s]+2>+`sirȀVJIxI4Тhk`Zcp`:QD ;Fn9u1vG{w𡽃_RX8s}zymGrcƐZQC3Ƴc?x; βqxt"G!He?(AIg=D<߿Xc3SԴ@{&@ /D`^YKJQBK [,IP2#}PMh1Ҋ\򸁕Vg[?]lbznS"\n "4鐏 $nBn$J(r^ljהlj -)Jk]b*w]&bxr?| 7:02vp h|l)nհi)k?ٿU >F K3t1J;"`[CBx9~?ETwLǓa.~>MhPy: :Mql~0_|ǰJ(\]"K}8L|2Z\7Nh\o `ڦ|na/WѫUb|2cfvP|xd. |/!\u*4 -#D" i4JҔTz0J7YipJ"a[4d/o^njF|0ȼgP50*4Jya?.:盚_nvq9 |˟@ڥDDdm2*@[]<j$B+Btrb/>~0_{EV?zG@Y_]1\Ϊj]0׀\"ktZMR,k |0$w1͜EJjK4S])/)7Z1L2f x/> }Ra`wu#AOpV1!jm  MUrqe"|T?nN APB{ :9@FɊIV Ym# #C{5` HZ+Zp-)hs'j\PZQ0D҆sl>t`dKB  j ~! ۻfK,OOlij,B;:(("ІNxqjͧe ZK|wQϗӰ gC GZr௦,}<ѶV+"e,sp!`Wזy=v׷k{FU;/ z:M? db"0"\>YCԺ *"fǧ)SmbIbzW٫+"$<vM@xzv]731,V?8t0Y;C@8oK0"ԥ Кڹu,]0R;I.Y% u8d¦5wLz"TSԖ[»J ;: {6v7 :5:ih2ԑ r0Ʒ8ء-۽5^tK?84f4Sɬ.x9_P?|?H#h4Lt770?̜>xj1K^2^wϷ!yL (аzi&&Ӹ嫥E4(栊j\<.{iDSn!=.;_.kXYNjTa<>ezrKEo2;nLu-.㻉;$0u ۞{@cA# -a74IVM{5V%&fgEkI݊BI € L$1DBxQ1:MxRQ%Yv橯"qx4F~XO.fu:w3?%]<M܎XE[jgwxF;Yi 2<=70-.v(k~/q_REc)k)WRMuzTi4Z4{B-;immғ읉*ЍLɚ2%.yPThTL*5ʏo@XlL}M%4`4,>C"mq, }@ġ,|(Jj86Іobwm;YKjb4p׶98nȮm}PtĘ>}TN{[Ј.%1:|k3զtV=/$ݚ҃wJGbJT9jid,|Wߒ 58DQGZ9ؔ~?r#yaT5Qi#fj"Q'IbgyTa!3[(C긡`k? ynX^ gpYZwk7UѐKlXqtu\D(Yt<^ z4^ ׹Z)s+/5D,vGDt?3'3i4OU OjINZOk Y{eY9QH˷f2W7͔xej@{2f"SSO E Dׇ⦰{9ذ:ԁǿwtjM ;AS jVٌ9X7g1e&mJZ `~yk! s[MKZPVY 변Zry>kI#VZ/|\™ ~#|պjkI[^' 1 }g䚰- 3 9qDFy}3tN{Z\nc+3ifaEr:I5TBqvo~N(Fʭ^BRT$ͤ<2w~{BJ(ۨlW oĔE֫OY+3Ck骹5Hp*6t y}Biޅ,M $]yIzƼvd\@>7R{Y.SDfj}ԫo/"|GL ͼ82,P !c,fP`n0N ؍r{VFSG G{+8:[87x,c&rbƛbЅr噶/դ3$Aοny%m4|_Iai8 _݀P4r8xcԥ\/X$EcԺBRyT } ( DDD" ~[r!B 1OP,X@|],]SkS}2S78zLhuC-+AcZ:l[|U֪8 4NL_^g;d-so1h=$$s= ^ K+J IGӦlzg_V!΁VL*.3 o] t5JH)q9r[vEpJý'~23Kir!0=BOzр$./߳d5I@:HfW6(oHk)9j 3[yZGli^?JfQ9U]2qtA'OT],c\$wQdl<|9NO$Uwiha`,L'˕7qs!`ۮ[\I.%yˎB6~IL_(y::;jYW]0-B؄3=Q;#L5[ t`L)p[_}4c' 7s)ӆ|۝+LaP󖵵 r5ٞzrP0Cek@|߬kn @7[5GЬ#`?^cƷW|e2U^ѽ2odzՏq'Y'0s{`oA ]cMw?S*xDΪi$đAK<9ZvF΂ƒww$8U@ [#)/2Rz)Lc?R:vŚREIηGOwu1'Uk($wAH߽-=|F?5( oۻ WY7}&3]8[bޚf?g\I>E++Fg>3_&Iv_Śe>u3) zC&._/+.rԔk /{tY WPr3FV]_,tGq@LO3]\~TVyºC͓ި7z)]>Y>WC$DIjrGbxo y?(~R6P>˒ƫ_u̘u)>5_IC&2~aqש!#rD@FrC6UJEٞt`fdԺ0.#Fξ̥\LrP*üѠM 0gPt`&k MnlzXND)`28p*%Rm 9HD,R.x{Tղ U;lT'/A"|_5 s{=Qp/ ǎ!eBνȮ'u:xq3fq$~kp0(^*@v&/Ԃr,znҽk26h\CyB{`dϣPK\3 bQ<}8fY$Dri1}TF]Z7fsED>6O7IOE x$ @D@d'v/!hfm!8cZ/@O9TyqqxP*2@I4u '@A87P9I >8C|7tA<*>XS*!Q`QV~@~4u Ȏz!#,=Q( ġO_ ])@Ac/P/ 'TKR9>z4uD V?{WܶFsMtf.M?CF\In߂%J,RHUMc&v.vE$LZM-LN0_ >sۄ2cDXgLZ4e㧙sBt/Y<pU֝ #mf2KIE2W}D9~auqb J=rg[fv9T̐('ҷr {Ŷ6h+; nT##BSUmc"=V,CZ+ق%Vێ&|hPy)k.RޟN9Ak2JT -u}uP3Yb:[sՄ`V_jl|ނ9CNɜ]e4K\NGw8jOlF<ٗw?[htHL(K))^gXH*,y3ͅ169!ab+9i9W2%LL!ZOd/{v2&$k7*9Г؟DZ&;?֟Rub/ e!b5 &՝;d h JM][-Pwx"͔ hU9ZyZHюoٝ Lcz3f.m97qpWzm@/`s6D~drI <(6u$+IܿwGJJbafcJoIM z&O+OċE&wM_7MԃǮU7S1Òt`+d}jA9c . \:_[n~@}  g){^`*0QancQR},P7=#S˰*i׀] ;|Yb.Yy:\KY o@PZG. jk%%סJiUVZiSnj$I2u\E#^'.:x]ӀtXݥs*> VMOL7W"Ʃ8G c*I|r` -5׫ϑ0Z{'}h'['Mws'Wx9퀐%uEٕC&DlȻG&*`)\m!(P)jjJ4P:gb~XU}ۂ1 ;$>@x)UKUȴtitB$zRB4ᒭ[* ){5\4\tzA.IKP:g?,>5E¼BR7zy[H:fҝ{ i( ɯT,׷iѨO:UF:}-FX<:}[x= FqE@iD%3x;0X[3+`{ AMw,^, yR̰G}Q䏛aF=α>w.5rQNs_~ 4Mfo&4 ~,g},x0 fS`iVd n TAC_*"8t!;cP-ݽ{c%u]=ކMD7`鸝VJX@=.c/>/@EYZ,"J[Yܝ0Oa8W鼓I]|fI$#Fݷ5GV3p&/? iNW::49͍sX/OՓvV]Ks8+zrԯ[iŐ/]Aq7ᴏ4Ca;ʋ`|hƸ%mZcXijp+1UXCD3 v}KEL"Y'x<0担x렸LGNwWH. i{_*NS=KT+ѣsdBoѮǕ^$\ܱ"J]:V\KNvA]A ;|?X<@Lm3\@rA£ ?M;Ծw=C:XΙcz4 Iѝs*BB \ ۺ!xΖF0;FxPѹyQA (&B%Ki) +V-%w0Ǟ0?~&lR,̜ύYk=Z=HTl>{TQ.q ,R]~9JݞNL׶TҚ8׾pZ;{Ûy5]y4 f ӯտ++0&X*3q,I!2m)Ӗ+D$B$N`5@Yv@9#[*߀q rX\}GP/d^P& 1"q5I9UDkFE 5$&il'H`a7ݼTO}0Vȸ^qL|v`},.]RSAPD6 ~~z|%9H!oX/y u<@ ""+JGϯl<_,W3d>QWߙċ?Vt ;h[׿=;Qd%c0pG4?5yg!wjޝO|#XpXj4X;Mҥd2)f#xp7,^^/~/vGp+pvcc\mX]LqWnG&?VZ7eyLEÉrnmf sRZ*pp{Of R-Ys07'wY ޚZe[ Itw҈xZ||᷃ߍ_< r)[m=L[|vtsDn֣9y!6% :':g3d/ V]:^gt3ʺX->J -;.`P1~-(G;gh+Lj=#+4{vfFt )RgoamYm[ZMFcSw>]8o}8_%ߞmxx~}ƈE’g:\cNDHJ-U2N̹(Ie*e1Ok 6P}kL*,7ïC}s:Rs存||XQsXn8od1XP#;_f9U3w9qg`~~6I 6p40-)20,+c_Yc;^3?e^,Lx({Mu &yr?o&&،wi6O{F0q4~[$2fbS٢,AY$x!)3d9%GLbR= Gj9뉍yݢzp;znC'a O娷siV3 ኒe85o8sIvs6oۇenB3"ӢpYzr-fx! `|"vCK(0K4"De2 .(~WMqZK[[GXY)7"KJ)A*a$PH)2xmeF ekPrOJ;$~ոZ9DKQySF)7'VD )a$F1Jxs&jR-jI}iF ^«8Z!&9'R Ι0XCq*L>BN5r(G#&O+5]9Q4ç7!O65c~.GXLcwr]YvikeE&(Jb#XAhMMǴ+nv$h]x:@0.g[R/AM BVg nsSue￑Y@1n(u^'a"Ÿ>C}hAI!ͱsWgZp,%VM ONZS oD5:;w*Ԃ'%:b`Jt, ]f 甖gwHFe4jUؽݘDt UӺ$U1TMДhʇ},ևp=\ #&|ٞo#2K{\~孫_o_Y`ȢBbv7Ov|t\?[`26.s`26. n}L"aZHdR/k%&,- tmH;'JV4(wK??2㷮pZev5VZt7%:t`iNk͆낳X?rElwK6{JL)z|)NkJ5w+ob{{S{덵X|]JY|=UZ>P-ٻ#fJ~0Fo@л9%31458ĪDYt}-!~w-B) M惣c5I ,[5jUm9 %'gnhfK{L ȱ^v?=:߹+PXrDI,^cWx=_b+}O|]=c5ˍ:RwD= FmTۙrϰj,z=Cs%p_=4I?7(Kde3AΠ||eH&ⱭtmB2#qSd<ݐb` [kwUm< dU;Q-"9 QR;"-}A8ņ}xr8&d[ͧ6kE~/ /foꒌTqqhϩB}}mm(kI+TV{$ 4WWryu=QwO%t]xL9yN<ܪ*i'6[ڑ)䩧z`E`<*IbTO sSWN(SJ8 8]G렛)%uΌ=|։)}Ubry6 z*z%IMjU| %o7){Ё}׏.i+}e-a*` L7RU/"==+\pwwdeDL U <9 cZߛ;yfAԈw&xq=px ^4͠W9˕3ۉ~ֲA4I4!TjiWѸsNtd х[+GX;`2qԣ-gM0?{m㾂?n]D1IMN$GA{=>'icD$[}BŸRwwliV\{!4F)GgW6_7?nDj͙r.w1o.>\-?NW^{/ڽ^ :د/L|%oN|bF4^ï, cIؽ|\J{#k1h+pWB;Jk.1XIl^bQC%8RK ^v6e)lĖC\*pwՋF4\*\ģVŢo*' ޼ongSń88=cDzi,Kx26DJqPC;y= K٧ 3ÇR7`([HBϮYX]~ذ֑xӵfO:cmV3u>*w.}uH ɦRzc75 - Ǿ`0Mf ɰ:6p19V7 X 9F7$D󥓜! iˠcbi1I͒ǘłMr1 UCn ]f+teBM+lU6z/8OYy@2D@BdInׁ:5:SQE0Vi/E dH_׎8ey  /,w*-^}ϓUWu~㊉Cz>Op2BBH߾x/`RH.y`vj-1=q$iöʬBS&f]/?0֞"K0Xrx/ ?J ɢq5J~6icrP=-d.UaLTEԫ*n'WOD6ř̈́KIlōT:YmBicHbN8GZo)!(RowS'Y+T(y4ь='6U+{R}pw?n 6CO>JLp?B tZ p6 8/I$qb%XZ.4S)A##I`'8 JSH3=~j֟Ǵ&k(#pTX|* OXd }ŻX ]uhEd+ɤ@o=c<宂qv^:DarT>yQ.Ξ*IԆ2~*Kd3y}4{ p+lZcPgv8XM*Ⱦsμ/=pjCBA26pyn[x4c;l̗32h,Y4/h^6Ey>r-0)kU) u465 VpTBDP׻z70 |F.7KDF#sVPwKP\1iFVkŐTZ3 N qawOL?!1YpԿ.n0 4lr6Fθ *[G6iJWhjWiDjAXG3~Uh;.g | W) ":+ceQ3GV?Ycz|O?Ϯ>e̗4x僝(z?|w_ůVaoGjNٲ F@?`ѽB3sޢuDI3 ]@8(9"{csl( DjRܽ-,j&8r걾sˉl ys@A%)@:JFI r;JY"fQ[ףԣg u*yZV7ѠTZ>Q$孃D#?qC *FB'md1_̓ ҞrʏAo &y0+f9F*vWp/ٸ㝤9Sb-k1.#Ht_ZYJN~$p9ү򜓁1/>\)e"T)) UFP΃ 1-7UX .ᠪE*AKyug)j'3›7Eֽy.fGWZl:cCx<5^Bzv ת{: ]#6Dh4~N:B8f'J.z0uoyE2e˦h6k)[cklvttI=*1":⹯2:iDquu (lgzu?UJFU&`RkҶQ8)VRyUTxg eTFX6,T=U37Cz<:ꪋ[l,l&d@hyn|ns/lPBH zrX'"'*b377HqR$vmEbRN"6i0>)xBR>iqN XrNF&¢`XMhCZ-:h #2S{ݭ.M% #:珜K^wj#ʙLȴř&yi"}6;gJD|drSL) ouxowS)вke'mѶ^u2achR_mz٭&:y 31bG:])Hz9\.ReՑ89Us$-%IqDt R܊9zNhp37D)v*Beg%`6+OկWZ>̕U\߲쥡 fB@=OٻF#W|ydcB^y_d3@ R3roVGh}<4#6/**+ 0YI{ 6l` l.8]f6nvg` qKۢvޏՃPŧ}L"*pR(mAQ<:|t? xXS R=6^Yӗp9LuI],O?/m&cؠy jzpxC-qvK5B~.jkIG KmPd lP İ0LXG\z+3>Z#$aGkc(j2E3f'I9Q 鷈 a3 &nA#3& xPBE%rEgVj#v-cy# j-a)0ij#`ȭ6pąh` *0<%Hb hѸo_ʁn_Dk0[i%BcJkA ~۝JI0Ka#F~+ʛg*/M|#gci*?L`.3 k²XT*lNP!1E)I+WJltbEq%ҠR'pX0_0\R,sJY( / 0_[@HXG Uu-ȥX 9s#^@"yQD, wVai<~]ZCyqZ/yVރ8gHXu;ѢT:qKs6_]q-F$~##mQ&L Ű Q\ޠgAWiaGPiǍd+΋E#Ύ=¦eHE5m MSn(vWćeJec(J*yLjԾi S?T=,Ęc̵>nA]=ȼNnuA̔n eC !!`CP)poD!IO2oQ2=Fi )&L ϋ3UxVߓҨAokt^x/On2+8tgd@}@Y` Ջ7=(e,4^9r);2 &\ͮo?~jtm+I}pɇ}urD+pf2vysxtip~CZFOgF~b?Qn0^|p]EҦRY,d+Y(L|_v/Z vZ>=C)vB5;~Lׅ,`SX/uaA#Wo>g;|/aq;&9 .d&ru}Nj:}GEJr/>G6/Rv5Ux,Fߓrd3$͌.0Gda}zT0u0iƋ_LQ\JAn)v{n>)fGkX`TJ-B9 aT_%ƥ-ߗ]H* 5JC+Shr:(0BY )K~;?^_uS!ªWO]}*^GJHuU@\5!ۯ?aizH@oOcɿ#vM' [X}H/y0ň7O'SViB_5$ٯ׻ou [pVm&1O#irp{_hxrrg“Ҋ[T~=:ݛv>Ĝ{g416M\,?G?G ܊^q58i{ݔ2 e7ne=[O1o f^3:D`-8 ˽)?ۢ{+L~J!bj97Iȡ(2Y:潲P0`>bp*wE:!\£]vRʓtyC>mI-+UrOջ ){GtTj=7n#- <}l, ?ui5<|/߮$mc%yЇr? #Xv˓ctZģd>ͼ+yM??)F~>a6,a,'J{c˰P ^HC޸SXj$N-h\S%ỿɯ3cryn\EΈ4Fh6TZBɼi |:M'Hg;j*_Vi3 v,ǺB:H{z$ѻц",Wuw(lZ{CwjGeޭִHywk ! 1! O7HPy:&I0hUޭRLzg83[Nٚ(tSʱ7EP؏%!ɨRY(L}aQ[%DE hxOTܛ0Q*/LeA2T.35^hET7 T궆I30}S=['ma`0%B\olDo2Z;I?:b#Ro%)-UЀFamdܙ>TcP;\EJzRR#0a: @Jkc@[dTI4UfR\vbDX_H$J i=YġCaW 0pw<[|̵Bʿ0;9e$VaIUӻQAɱL$RI2øNC W* Z]™]c̔[6:).g|O4Ǟ)6_|={!Ȋ\uuM b7ȗ|hC? O\zq[NO`˃hVzV/9\,~2σyW' FD {'Ťw7/b-ZܹBp44Y m+w4agK 8O5m2>N7.#EI^4rs<WڤuOZ{.Ấu(od3F1'XKc} 8&|8m<68qw"y *TKb7^.6UxbrNGv2ӃS[#OѪӿIϮI6icMۏN#\b}٥]nQ'S݅gŤS Gr_ɪ4w֛emogW!x =eJt' 0 xr+=G:cӐclDUrXP#c5VKˣq '#/;<5pŕǺ[/Q_XԀf ` d0lzαE^zbBȭSD}+~DŽ'c{+1]8u2& ?͆{@8S['3K#mkHB\L.8󔬓 Q[Vmq}) 6k#7eؼ_C8Wy#f$.\웆bҶ*YŪD B-$&M5@ "L+@tcKQHTibp^-,Ft}>΅‚*`T) (qF-ɛaT keN% =RRʄH $j0Q_=Wج؍} 5(>vV;`krB_ d\> HdΙ(? |(NQs%;Sd8pRa kr@XT`Stv4`S_)J SD15pYTyfci!CABH(A@썗{bd L" xrԴ)|#QL9mqY%gMȡh:Vh?BHzĕ8 C>u kQh'WQg!w; RϿ~h0DO57w?R]UZ 7/uw Ƭ_ه 'c]ο !%BIO>DW"ЩICP젊ީZ9ID^:w/ VxCr)!5 #&2bbկ(rĐ,i\$j;HlӅ[ݤ1P_)AGT/:[I<^#A"ItA cM;R74jޥnEwÈocJv j Wls=Xt)9][FiJ>aQ [Ӎ`sMF' lJ ȾA%}mBPh,|U&D[Y:XI<~%f6nU5CȱʼgĞuX|c:5VL0_﯋%_.{1hIM8Յlid뭷fF5vz5S3M߮oYl6J39gׂ8 k^8/zSϏzS}}p^|PxӜ6ҞZ|_,W;ϖ-ƻL366\?6Џp>DVcY>l꼠Xgd$ Nz6?LCP4eavmxOМ^xTX4Uj Ә5^>hdzY:ߋp==qjv?%S:\~̮ o[?[Ͳr[:?0d}u}9]\_ǐ\0lr}6w{;MgwÖ_2#ᣪyOGT=mlԳKUHlE0BmcKcC=<)G_tH -j[LfCvHT$7AMv~apҴۇ 3C7k7ob40vXzsw .+WjXËm3a(ʣ۳W;\%܏Mcvm+~A~uo,F!\B̓և(Ut`O19zQ FOh"® YaCMh'A%#t$.^ZcR/= VU^ό)/D(EwibA%V IZEIGy I2 Wڄ AoV92UcDVa ŁV`Pk*@2BY_"2mۆ]MwT1U8x|`P"zċϐx>\K\s(q}6!:א撈ֹQ0k/8D*yP .)#XL/w2?}`BnyŠ/:%JJ2%J@NvXgJ-JI0Q] & FMW53•/&aګU:Ch-LXsʤ P Kb( "%Ւ8"X!Irg.&}Xzޡ0R72}ݻVbbZ/ݿ/zHo\x#bf*: F &CTs);G (yaàX!y0v`I `=@`!6dO5dl=M6 swQT}?[d{1 {w^VtTm<}>TK]hݪ[R)߽v:qzE宒@>|IFE[كF}6}ؒ2-kY9R 3_a ?ÀA75 XMŸ,⠬h_ZQLRf:,h$##̓ؾ5J&̮ #[5c#WlH)էP(y:IY̵ٙ R-G  ;6l3꛳0h ,Ҝ@0;KHǚ52^_u(%C&OHעkOQrrJNNR>k1s(a:rBo~yC{1j?{[ ;m'gmeoQr4:'/¬Z䄁 6ێ60JFꎑSecW91A ]f-F ݟ؜rѩͮdyWjޡrLk i2vmVLO-+ZEK]o ,8j[P  i Q&fe%O RƿH4Z3bchCyFfa A eaT(o@C 7*eN @j/9[ N)@acSF IPcT8g@[.!I(+#b b*XA{c`U2qU[("=ЊJ-Y!q`lD*sF`T:,@(bRI=&M8P// ڛ9aK C$cTPǹ %C@$CA$le1!7as!Ap_h_5a-@`a"f5HpKLpmAq 3 d pAw("][gaXZh)uJ͠UI3 A 5+hzM-]Iv1 d$ ? .M~D( ?:zMuc_?}4Z-'7᳭\ߜrItYnXܹܺ_bYvu/xrs;^ٯwĀ8w 3e kͨxU4 nb,1#*^8w'8:A{]au}*󽮓8~8LSܓ'9}vwy׏x1Y\BV]FC߄$d`?uuzj&&a?}[f'IvOu cUYo J$ &-QP"I-V 6'CȖ(kA[FέO-PjLFmk(AU{~њ"ѵmFZIA-pu`=B)#t [4=;M_:36/@Q>OۘZ\zK[3؞ Zz6eJC0#{BVzH{a 77ŝ1)S DfK&D6dۡ+oNBtI2 %_xï >,"]Bŵ'嚖~Et=6l{9: :A/b Idd%vvPk:P2O@OZF4`G%zC.l8dTդ SrjkO(Pn#7)D8ɰR;9}ԒI OubyQ C%<(fZXQ#!sF\QWO&lN5ԪehN5g?T?? $8wak"GTN-#*k#!g.I2Uj%M&x-:&֭Puf+ִn9$E4I(Yu{no4nNY5[ 9sM)m`ؙ&bPOFiouJڙ&W3 !g.I2Ec[ La0':ihf'U2֡uK QۺKșhLq9߶uCr[*o4ڙwɝ_L3HșhLa:\mF bo4n=wNYw-SMCB\Dd m[7y=ZT N7X$R۶njZr"$SP>pۺI9T N7XE%vf3ִn9$E4I|uCPAXD jN7Y%Buz0nXZ r"$S1]"Dk/yY5u fU?c/ͪ ˖|٬ |&1|Kneɭ̠ O1/|Y1TƬg5.5A=C1"|^cީ&POoYqk5j,!טqWydטiI-).ʾ 0է z06`"Sڌy'A"i F q2zA*: FHXw8l%Q(aSZ ~@ W@u[: /1JC v$|gжI_Y̬P\Jナ5mLLJe"+{ ~z!H|@X0qҚ1 F1`.EJqňӃ$hEԬ14q&#^7nfr{l16|9._ث/א ! +c3E4 b𦝣=}MZr AƹIBtIngNc`wF| EMƂXfAP&Ep:=z RڳJ䬐^HpJ4(e!<KAa :$  ?AT@r" +zZӇ8֜2D3;%\B N;d+B@f!/: ҽX@jYmTp="zŪNB TA;%^rLx)%ȈcQ%+!~" g@`!h"]  x"qJB ibUC#^&*bARQgpS-$1$ `rBQBF/&hB)2RKi41B8&p<hM6tc4n5~i;v ΪpPnp>K&҅ɻAl͹,B(IcV;T_.R .jRvS:$3MR]#=&Crs?}Y׎u_:Dky"JN|`e!hnAWA4nTnoĵ içomƍo|_:~&5^r ᘸ( LաL>t2KMΔYErDJMw^P/oo3?6Ɵ;!jE]&iLL ^A{n~.ui\]페; ς]j2_V]2~-1@aJ>ZچBR<0VT±J"Ք`QR)FJ%q,𒆐v Hrr*p +t޸t;i#g^)xqXs\K9pYVNWȈK5ȩI;řD d)P8#@m$j-A lxlch @+!'9zBH8շZgrv7NddҖ܉TΦeVK3{gV9ȕï9iHٟ~I.>,<0Λȝ(u@Uox$ Xw5>|yg4n&O;]3<O}g0 ><\G[%)"X?]]ȴƪ2}W9kڄIrLaרT!YzńG00Xd xNCFjp QN4Vs"X՗TKN\ #PՌoC »Sjc6Aи1 Ys. \^"\1LY᫫Uc?+2,6zit5(L5x᦮!% 7E@B~dA, \H\*BzH'aGgB'ܶZeN9PMEKp;sY(Mn/n 7pU2~R)꓅CXZDbC ێKϵ„gĄ'%Z CQ@|i@΃A[fDDB$Ngbq“&%SBZ QD^(pډP-8@-j`c@ (L]sm׍ӝC4oi}oKdDPLVS%* K*+$̥=d Ĥ6RČK8m8A"KxNc*i x5;9\cYKH r`q4ۥ 6: z nr[B (g'";GUoIp(*E--B8055۪R\vB{scwdw`/roA]1BN8#\*?c`Fo@ѴlkkI]I>PHM;4x삓x.}RYnM6g+0/H}n(}χ>RmtbguBn bVu/r~oyC;7^HIhC[ÅG&=(Gh/љ #Ւ]vpvsP8KU \}|p4*&\xΤy>p-Q;Hb8 &ŀ "Shчp&t_8<4 +8> }7qf %>k3cz_sDY=C>gM=isEUl7zx^%ܽGU a0g/y0b;U~aiL$$Z^Q-.oѓRW>]qfc 3z쥵1ayb!i jek )h+9:,@#dK{ʩyLB5PvɽTܭ] ŕRqvB_-]ZY][2C g*$HSGwBđ^E 'c!]gT 7 v]ސyVea+Y TᒱڅM2q9.Q8iYi9Q`%hBZ>(h_Pm#lA_H*b,l$ҬZw6o1^բ *sDzP!WU$k곟g9>Roڧ>{[}zVL+y=.](2}v'cG"pHɑ]X;^W8$r5!Elc28{ `:t*}ϡކ/!/zw;5[jFr\!H}TxnG;U9Oy &3-7$:Igq4L+hw;, 0*W_DG3X0<8c)LIz8Ĭ',L;o vVp$V"{ϳÅԅN6@QO(D'G+O,Y,H^qZaВ 2lJL@N#4|D/eºxC@8D@R{Lڈ\FaZBאpX@ r=n-Ԋ!C!aRr\}n;-V9k1 o7n9[J* rs4O宔j uDB~ķRn.&9%sKEqR½N+ɨgKٿJMVAc5э0 TQGk2U1?U!]#&ChQd_B~`9,ګ?ULӚ|8* C ׹FQh .ݟxC\4,JD $EMcQդ;>otFOe\Uz/@ΥsӚf2 ư [-aoB]R H r)pO@b@q |ҐQ $"826y"JtWqq'8{(چ>NAJ֡Tݐ 󝈙E)&zmք˚SyhHV|1֌|x}zwH_ew.3e%G_._%-Y bzXɔk%DUCǬC!iب6/m .9Aɱ:r,c$ܚQV;RD@zԤ3i6[&CGԨ!YOT4QiDga%nZbCY=fTuI$Bvq5x+Rr1umP(CX(A)7dz]~9*1Xc!L TR҂'UY,|4D'a/YWQ!2ՓԖ#L#?KYm3ZJtpJ9Dԋˋ|;,Kv"` ġڏ:m`ھ9#/CxheNt#yp&1ZcA Cv~w=pj,S ]NK ӄg*vY%rufĶuՆ# NSڿ;>+ѪGi1Ύ*o[h!<ՙl.Ձ8Am ' @BD;?"#TddLEFVf2S[hw4R &)- "FM !1QyW~Nl~>G֏kstEgIn$@wqqG|Y-.Bg7~ -)p GP ƔJDeԟ7gL(>M!4M!ݴzHWEP#22h5Vk`΃0Z>]i0:|6u@sPrm'4T >``SqnvǑ;- (ߗPu(cZaDam$]-B@rdFʬf }gHff ޺PsV2:*jA@ P%*(1@P " h춺=VM%}6W<\Et_sU-GAuw2ۤ"-Оj&p'.t615^UD}&Bmb.]]/)Lb$u( pus=OF%Z4kɇ^gog((M>KIM_./Iᜟ<L"w@i1~2Gm$)p Iy0PQ]IO(#&CDJ(si6hA3iJմ"Ul ,@4|PWQ3!3ΡQW)B?5ƨ4Y DpEO(D) N rA,FH2el R9l)VZB75ϫo(v=.;cB4`XN=&0#ڴ'%؀kPDJKh-8M 8-ׅW @{;/L )'|Ձ .Tk`koUz6!)n׋r%J^îv_`"P]or農&p% ={6 Ҡ%E N9Nng&}(L oKJlyr)4A! }}J<]W|B3cd@k:| 1Fmc>>%'qayj ̥fl2݀W>f=06#!po(w;9͔0ݫ;N9P4Hsh58Z,[88Ɉ4[KaD1"3u391X-B0Lj OH4TZaQ(Д%4HCDLgW $Qjҧv+TNhPcPJ :f4ؖt@ބOJ Tm*Ap/@~liN!7#A'Tpf UOƔȞLMi֙<'hiQcZG3WD'PJl} }jRwZ𶺛6rV@_]lk:w9d+ޚջf(]j?oώ싱wW%Ў˚)ŭ͙ Oje9n~ƒM˄cQYi'Wn;}-?]L|6r 8s *"*u&D4r+8c3JjJE P'lՍ T&a3j~QnVMiM>c. `iDIEuKI8Rp/f3 F"l1AH5[Ws_gW3r zMYY~##|,V!W@rF;?/ƶ2V νVn 7oNOu0G5;|s,X`E@d=AFP* q\Ω<hi#З7.ϡՇ ѭ%xt~N{/ON?)~xgNw;*do8i>`yj[|yzQM~:x PTOWBϡ䞂9CTg=g#Fni>_,>og! 9떏ݛ#d|H,U/?JpUwJAy,_(}Pkѹ8 v-} 1Hwh7KlSɝo5l'1ǶDH_?$jtH=Ild}yv0N}'ʏONVpw !GFgN=w=«PQcӟNn<;6S:'^^u/rG˄jdׄl#$۟ۧ9QibͪW4~5\EϢ $tr|X&G8 _.${~i6qynऺu1MOQN⥝dc^%)(&*} ;ť}畢5atV{{_*>6QXZ γ(:D[!I0cRjAbcg ezZ)MfOJGab ऻ2Kf堶O@H@o=Ѩu MX,wZOBu#=eD*WQ6DsIJiUʎ!\|n[x*Ͷ(=, < 4VL9nA3]+;SUI9Vn~9kF+UV[Y MSOVV^BfKG_}ӏ.+`bo柪Tož*Q8|1P\_l.5DW,s[pBcز ơ(,T_t;2)cXɝy{ms!Y̡6xU4 C ﷎l1 w-21$.Wܵ4Q}uX=tDAaգZvz}-* $mN _Di%8D5X{,vR$/I "%ZNDm)1 ꈗ;c^$nDk+0-d `vJ_5 ڣ\TڝT `Uj]2 Ylbf'6n,0`Qա J `EYFi}<ͣ+TBb6IGv(un5qϻ+ c)< #E$eBV\ō!؂af"'j1.c:,^%(Qjn+Yzg@)tWtZ* UP!g}y^ ^]tzxEnj ͸XHi9=I}m]<Χ(TtSuMԼTo\KUwVh #S)/{+c#$iw;4krff7]n&_P2zr&:}/wOc5Y֝V_Jja"Qשo2$Dі -+Tل AZ0*zЯ1 Ad6Ld% ~(\9! z38"xMuBq M4,&b]vZ^Oܦ$z鑿_Zd_)A9gH^vޢp$.E˶{k;ve \խiDL}<%"btL,_&2Dا-?3!B{_D-uD>1dEhLJ-INgK vqW`1b@>oJך&a^pflZ+fké1MQ'onp4-%&Jô7KЩBF?s1 MM͚P#Q[tgh",Y«yECVK|R~76XeAɇtؕܪ4`v_Hn4VR.uY2_j竇Y-&xGZUi`adyi0-kgkj楞T+V?0 G Ý uyKpAv*HE7L-½YonfzE>n\NFGp-8 u8!톻?RPC9TԶGm <:f7 `y-lԝmU%T7[W]QvUk=7N^kJThix{sD!Ye )O5Fcܟ֢֩QK1ȩV6gфBff.$䝋hL5մnh7C9VA蔾v;%*^ˣ>PPXڈ0j==Ne롘nh~]7QAuw9ˁKi.y .eI*)Eˤ?z)n]Jfsv>ϻEdBu};‰ޝz܏|\XzR4pn8sus7RF }P¹6Dq9(_ՊX-v}ܧEb#GRs:/TTW.HQ7u]@`q!刜77 TkBHV-OYL]&l7ݔ.WjrR=rM'iP{bIy;)_Ii-pY:|}w$:>YbRהB+Y;_ b@”&\m"b v7BHFc>п!JHnpi&tLUL{QNkIeC]WtԲC*o^as^0Uبc|';θ*Qadq2s1 樂1'U: %BOz/ڙeC|[0Ǝ"vRчPVԋ\⸧t:Mi{a[ki$3S*Gt#y^Q7ퟟ 2IyziaZ=ޣv \/Sp2/Sp^ n+w5D <0g-DrցEZMI>[͟gl }JrW' Ua|J-$C5KěFtGKqj5.o-X Lner.n@UFpJ3u< Y+\eFe .|M>X|3ԛBk3s<Y T*ԙJJ# Z(8 FԵMt4j+ !BPHtP!käVNhZETEЦ=6!̈́ :dA!!8t`tF FQT8.\;O|`X9iOF0vtXj3 Rł}aRI߇O}f65uzdqs 9~ g3%eDK|Z Ӯ cŅI=Ou>|1ёH,DVtI̲u;9/HT1ջ4R5D" licMPzS >ZQBH[$\24α&3dn Y?1AKh |H4#= j$P*x}{S~^۟.]l^\yr)BvT3z$5{tg_ӻs'5Z}>o+;MhYO@MY[lUun÷ +(> Hşf[}gK Ky0nA0+Ԣ3L|0<ިu?ćt^n9s6kxa~'O&ڞASL֞pa~4s~ksO8~*B"#tZz✮ T2Ǩ&2f3 6qOeA^ډ?FjjQ8qsd"Iow#[ JOI޼>RKf5qb-.;!z$_kT E~n$jb;諸$8Fh,k="n"sI!l$)+ɮ^Iq֖88x1J[EA@\QMqU鵬vq3]8L>q:]^_﫯UX=jItU'f,LiQe-@V#=$wmЃRz`o=װGh] #د_xAY\Fc4[87寁,_i;[I%$tx+x? 0@qU'* K(`igJe`/_@$QukWڔbAOCBp+\i!0"zAދ~]k4H;J@6w[=[BA9cGWHEUŖ 89shD%x uUK ѩ~A=7x7x諉)ߚ5$F9G!x4!=#g,ʵyoҽNN@ 3P&CNBO9Nߍv:v:?a."ߤ:r. =Ǭ!Ds!0Rl_E:[pJ_#x}5M ]֊g lbff TRJ0e8m$~x^b^WeUᯭq\%k=Hަ.R&u)^0Y,ӖB׭ N}x9p@fd?:WAנA#: a䘸𽆽f )%p|)o>=%%{K0;81FCo}|Y}l~s|tHnl=˷SP>㤔ьͨ\eTu!-¼ H6Ԑvp;9@L4nwL l{@Z*8/C>u$pYNي6zuD2H_t"}>jeNBw,Sd @j̡k2qdAY)]w R"P*:*i9 XJw jZwK*9;dIf%(ˀ ̒r-yAbMQ+à fQ6 3X!kok ֱ `/=j]$ eP})x\2NzS' )8Dk99/DP~ۑ3A#Icy[ܬ;)@א{Wֶ&٩6/PM6ɞI_kjwDowv~wr]/0Aa(nlz[Ƥ!7jLAZRrX [V0lv<3A?D u\2212121f*4lGP.E CD:unNzitDGW.;X`gi@kU?8'"A&fR( fAw}mƒ!HBz&e8nDtN{Z!)WBt]+M8Lq ~iY0p1;T/ &{B) ԑP0Vy.r6};.Trp*)Ҥ&Ո/O!ꊆXn(1f uCPNET4nIqH%/$Ed-\ꘛ֒Y7f=J(c8 cic$5ej9p]'U (Wu6Z>#^QUk}JrD9ֳļpCMuGKB}2U@ү3XQ_FNC; vq07Bd ='ArE9T ( v ;Sz#TՎ*me^!,XJrS57ܤTGNxL+aL$9kNc$n !}(>ܴ 939e(4\Қ)}om긾GlYز)Ikӓ޻y0',Q/UOWW7G迓0{ڃZ`(2)wHF/(XCu8P/L Nzx-UOSpZR4ta+ϳsu [;ux͘1(H D`sc(wRGm1H4)ן)NRʻdưyQNaUtH{nfg/>DɿPs=w9M+X)f[<*W H5HTDmD} ź ?EY02pt9üApc''3&z~7m͍N/[?u/{ o>Ib$({re>{0{n,c|_^9Go\f='4M|ȳV6{١奄0U Wn:QVVĩv }de4C|0Q'L*@[*ltzoGwѥU"FjΞ.eS`-*UjL{Q8NYf v%{n61d!SLtvv!$'NǦA ӝ 7O&eۭIw p~LJE c;oNHhy׻ ZP﹈>ݢj/,謐>Y:诞օsS9#K?fՒSy8xnҝ^k3P:W>ipJs]'w;eWT3>d~ٰWg+d>4d|ŇnrƤC9v \lh+lt8THxO%s! ^B Qn=*bn⚕f]k77]5ExE6jy+4/T^5OWSͰ|qT%qTu:{!ܞ  Ͻ'FIIIE/ a( ts)Jieq+ #J{u@ٗVJ{ªvdi T9|>WF[sT+|Q8rzօun]7Sl:cp萇[:ItkRP}3Á)Ǝ鹐F#׮?>I[Y@N FC0Ӌ۷-UR:YՕ[bd⨝>Ta ˆo7[Ie^ZSq9\)3ni.3<]sCZs"" qcSa ^an. = V:o"9G)3\bBoz&\LyL|+ XPW+Q|o4~l~jw</Ab}Ùb݅g'oNv;흣z}>~q%|d]1Q# NvNۧ{puveot9~hF a_9<}s||tz=:l}d߷~߃lvch{Ux0?{-5N+u{+bדV>9Cx{go+KjMv>] ]b뛣,]<,|}K@o]Y!i|iz8G}^48A20= 5tYݺ(;|G 7&bw|V3Z=kp xb+'{I_o;9k>'_ѿ'MI0 Q SeqNR0x E! (-88"7!SQ _ó.;R~qݫY(\;u;'NK.eIVa"PŊzsZ)\MpFh9fHN#DR"\[E Uҙ>JIoPnNMom?jb$_#)徿'>s Ǐex(->ARMͯegRs=;QNy }7lm\_۠7`^lfytVhޣsM{y㟝~J/~E2nzn\Gy8/ ?u?"ra+{M:?02c${`w 3Um1'^t ?>z5[>ؠ2u7^w2{r}uWgaxI=@r}z5{4'yvܧ~<a 8[̍30?]A2 +p+>Awhٔ9z e{z=Nq-7J~fswpooL7'㬽A)e\ʮ~|è`x/9% Yp$vϷy', 0~<>;//V\kb~󜡉2ϳb~}:(ب M~_2n^$,&9O~2(߭x62@ yHL/z/JԞP hʚQꃴT3\Kl\wktH(St%PnLcz3:Z&bqK0.\D/h6>75!"t@Xݚ%3]`_2.=DlծYUUJI`%`ͬR)J ²Pe_Xl]&;!) h"pp}#$$$k'- +j0$*h8NA[9FU0OU;]iHSXs-)A2Lж*g$&sY89+)]3X"#T@DՄn EoL Y߬R0%}xVq-oY6żAIű6~: s%u}C+T^l;xE$7 lXQkZT.0KIx?Gg\Kb_2rd&0SMZCg1fH8Y5/w2J^YEXB셙LRUr?[r?o]b'?{ȭݖy) )3'6 r@6%$ߗlIVfQLRX* g ^JǴ%2 =1+@+k{Ih|3woGbԐ(3-QLY7ķ5B) vQ;q:ʜyӊ73JO@iF\1|&GK"GQHdIW"J% ꝗZRlFcdLr#wG͖tPNIr1컾-q'1As%Wdfġ!Z$N1V?/F6k l"s:ړW5 ka( Ѡ5<-8KB;e ["N `o.xꪰ% ʔ4SՓrQ;)ROk9odHp>3>Œ9 ':ZqʒS8n EEEeha2 fgiU<)TWs,0?On_Yu1gScTwGp$ Vßwdw*=|3l7['>spd9}y0فozl޶M&21Dd4Zn>r7LbS_YA8cA158Pb|\V1<9+/J-0쫀5ʀ!xǦƝzz`Toƃ=7U 4Cw{{Sެ᤭Hk nOfk'7w*-#C1>\]akӋ̋֫tnȨU36EPy/Al\.bp w#AW=Tav:Jz}tWmԯssү+8X_KXtgٸX9Ts EL mj0e|ZvĔGsobEՍq>Nwӏ޸xp`M\ϛߌbˈBr{MDK >>7 VZUU*mǓ๺٢mZ-Caue'_/^[L0ե\%m+KR%s4VDl)uw4 с"9}>k!Xtě KY_G})!g>C}k? :) qӢ{u3H;l <Ӎ/.f8 ?  d,v#! b%a7O|+߂|ڤ趔5! GFxeB#7$ saǤkWӍSadmhL+DǃnQ[RtɄ qR`jBsMR1HŠAB&t#(w ԫ\7Ra(]xm*E9dDsAd,0 ت T0 a\qG"F1@cϫ17.F2C0T .]P^13H@zTJKL`E(0qX-$il\0D:Jk\R(Zޒ3LS*a>"!f1kBi }0|N"E炏L8q60^] -8 ':8:j_ݼ@Ek1‚u腆1*DўDai]y'}P1F0 G'pŐ' 7[Mj>dk>4B]0uN{yO47Ɩ ;K|&- ZQghaVӯZR۪')i;"E?})KܒB{O.0w.A -4hpթc//7띋EF eb\5)A Z-CO` F zC|s!EC87Ð0<B˪٬ϙ-lgB7 !YPa[F5 \{ %aT[ZKgO[1L'[)̓mtƃY͑`iDLމf -g6c|)IfħRTK::9 K~cEkfv׽;rt tz:ofNßaA;+bPƬ`Qb٠`c7/FX}pc1((e4HʚP\8O%cLNLR4Y`-pz^J1Ό`G-",U0:V#vhҮDIOSb t|TXAcp;'Oo2p K߮ϴ2hDn $3`M&8+'ħ$`$`d mg12Z5e/uQb=i?skn 5nvsŎKLF#tHkg5[coʰH{-YzGWhP-Zknp3.ӏyT"QQQQ]/(wfXRurHsCшk2帎hb#%F4gNJ5W݉UzC܁,h@#olG9كKMWH{2#X]%"QǓwUj`m{L_ $H4VZ{r&ߎElq6}]_|l_.>fӫKL05WR(y9jX ftDuj_LJxRMŝ/̝|r-Yd"o'_}wBkV>!$0U(eX Sr) c~'dp7yzvEi N ~OSjjh>M*~CN7wFstړ@ FJ+9{(Bt< :RBZ .Ro52߃]=P_#_ wUU9g2sF "Jz./͝`цmNU.qKvX H&WBtW Arޒ{"Օ!@x+} ZG(v&.dK:Qels fVtoL³RԵJ1ŵx%쒞fXZ4[VWiCeԟC:`pז#AtJLX7#h-L JO (Q)}] 1ڼ^ɮyq dy-%e欼y$ 7@?[K: Wso%7M %lQRȑΔ̅G1Q-֦kNZĪ9Z5SͱK!ZF- mt˟Nm ԒRhX\rw7F RKu+x b]'G=%5Q6f'ιr.]@S*s:[OY_&~^$FÑYq%s΀]K!۵cg߹OW5?k(FyafUU*QGz4= |qzXGlѮ ź켸m]M?o@Pϒ{rހr;C~p )m?ޓnV7gng:ktn糽jLM&+<hO>Ź+8~z>FvF/1mtA-"yJ!8g픋3Χf| vzuwcKEϖDcRqؖ]ZWS,KBo)_4@ jy?mt,D.,S@3bSVh^(3h-%Ezs_l E-6ԹZ.kC0L. R`mo0sljJ{cÇBOmaLds# 9mN@ :Q>=+Ј+)s+sgJEUKR)r`%6.#EYehZ뷠2XbIJ^0Ql]ڣC -&oեW(N6BD} M3l'hΦڄУQE eE!&U1݁:ôQ쭴z*O|+|#`-YZLJ02,ɘyYe"WxQ, IŀQj. tf+7r T#y[%斩frjr"64Z4[,A2;/ /3<]Y8 d( xH6M-Rfb V?JXY7L!FM}BEFrdh 쳃+F[? pmi{?F&J;RP11ōLZ$KQX `mE!.lC\o8LDv9Z鸮4GjW!ȠBS315q¹l(A#lO+ѣ*"MI0D*yW *f"A]qRѨj׷F\QJ<͂Z.7喈( Hlؔ0ןڀq>m8& e)q4nZedQ:a$=aË֏Rծ j6SAnaҹI L F^&/2Z}>g7iLUgA q݆pK֋U"Rr )@K]_?@"su_KF !HA+2JƩJX+ Lxą15x WN(] 0x뀫uYk{DMⴁkX|%MuP3%3 Ju2]#kln7[hE8ieS b+]z~WXNsᬮLbsJ,Zkx{ |}vЂo` k4w,uK^Vil"<Ǻھ-?bdԁz_拉Pzu !tƣi;/nW| ,$?-? `0Oy@syM*gw7S&'ue4 7(0'.~'ke=P,-y({tjf~Yµu;C{Z_B/ Β{ӂܜ'<>y_jMۉQbgGUD[0fd7P]Hfz3 -e"J:avWh5*,/bpVoTAFMtx[>e/c4 ObS6rޏ=ڨg?t(ȥuD-6z4kyy2N9&$kDe똩ф3֛>80腔f]FS- )ՙ˄ܾ^O`ZN[svTAK/ɊL /3/Wkخ^LS"{ߦaՑT Dze%ʔyGլ ˹;;-ձ*Ͼ9&N^ bmFs!e34\:rY9yrCcgF^qPFZ(Mᆲ[ZuA"s(*JrB9V2GS3!+f)9{#4ŨmdThShhVwch)bh )[vxD]erMΚ;XYԃze+xەT-Pf%55AhK7;i!Yݰt{Jp +Rrxx<ȕ4؏ @ ⼯;[>XipReYgT}ζʂh)NCfgDY̞rsUJHp&3Cd &63347Wk﬿O=o:Ä 4O j6 WuGۜճPJM+5 inD׆m ^A"ς Lk͸Rg}`^Afʺq[p뎤6  WjlO)gte V,֌{@S U-L<]Mn'iIȍs;u(v+Wւr*?㠎K#LĪ/˞DFzkE*yYqU$U O:k?m e{<0_sFTfhWyF2&sd;ȑ5Wk\QM\Mp7Li O-"GW઼@+zrAx EjwSwIE|+3Pct,s3!qˢ?E^LgG2ZPZ,pdC4cdBqiƶFD{>m$ omcB ɗ逢oA6¤hƢǕ1=g'eY%ZYqGU@*_[-یYcsGyc=oE8u\xiF̭-(  YHcOv2r@L$Eh 8P'TjyLwK= JG*N''*+2(rd~[%h)K oqG/ RT@ E&Ң86/[G,وc-&MR 3F"fk$!aS;Ovd8Eo+c·˰/ 8}NB@L9^_z{dffEQ·hL@{.rcT1*KKNSB&XK 5_v4{NJjh$԰HMmcXGXOyV,Tq7ل׫T0H,Z9,ίs 6N >fiNnFmÁ =v{!C@XyiigQ9WAy.W`1FZ  SaK\½QG_b2;5yЗܔw :fDnCΦXN[$̎Cqgk7^Y95Ǐf؊J)c2,O>PDMӮR=FQ-2U @2ĥӁȑdwq/Y^nODK]HNb"e/fQvfI)VפYmC LSh~̝Ȭ85Я@+騶1nuJ$X7Ra&c X6cFdy}.탫$iVn!b2ńR"G墯`0Pk,]SJuK.o80i&ƙH aK`N^C55^G:Tix+CBSi$ F`pV1Tx#R8ou3Q*I sQzQ6Pk!iӢ~o+CV(Z& 睄~Q܉d@c3IsIJ]hkd<OO3)ᳯS_7=6ئ}:G5YDҎH#Sl;(#í},(u2>ngw_nxrw vY</vw?{gN zdwO}L{_᯳$C^{1Ys~6K 鞄S~gJ/@z{`d&ҝ+~!^3DKm Nl,OKs( GO> ƿ ?fo3f{#M{N0z݃3?xd|jr.ݏ H5x.Z^v 8 t/UyܖLzŃɟ}vx4z,~l~w:~~>Og;<Y}η Oeە\ɵ'7 q`\kp|4Άý1OD>LCR$݃Y^+|ޮ8bN |0^η==o;=?~W;oC&<@?mOzr<l_Y:+ii: sS=?\4;uf8]O'v#+x~iKX7>9` غ/݃º_{ F_5 Z׼3 jq3eΡ !x<tғrB `6 ,!| | | |V uNtҧ,+"0Bx p (J 6ۙ V!\Gl-9_7ٺ֭ӻ~hLOS DPq%oi7ދݧsq(~/|V[ݭx0+;ff`:*Ly[E QA\F!ǘ!Ž"FIRÔF8\!QtGD,,c̪2£G*-]OV`6V3aLiY P\B}.džh`0wY1S[%a @xȬ60RRa/24&ĠI碷, 5֖K E!V`Y-&ػq`TzcRz\ ] ~uR'#ϼc}k#9`^IzÁc 1he)BAhi 0QRMGpxbr+TZiMu!i̒C)6j$;4\mL/3d_#`8A- E$q(p@w nTT)&Db^Zb^ʞ4` bJ I[,sGP)CXG}ksS{7z澵om[]_[~ig g g gE |}oAXmLc (!k9mu[d"Y_[ $"K2#1gOg&5%IzuY t]/!pRvɷl:PvӁk6 $ F}$ʫr }"SvW Jޞ7B$rpͰTFBkҬv'v%A|tpD饪Y+Jͽt4b.`udQSOA\h<*'",iÀ "[yețH@UʑWZ+Ӿ(딥NY*딥NYS[iYgҨZj֎,ZVc4}T+m_ﳁh›[Y!uzLH˻͢ )fEf)%!RA$*j[NZ)(a ʆDxYae4ׅZ3{˔/S!fn-$V@n SS@6љЙJee`!JpyG@a+f 1i1!8ȄQ`j-ךsT>Qj;p7(oB_>9GV3ΤrNcn'kˇ8b]ktj& ;eڻb~gk_PɶbzJoӥ",]aiXaic >1xYc KsktVuQG#y-kM:B;Q7B$uoӾIƈRX?(l[v/*lE,(o;[b7^ J#^},_T޾ h!M˾YtM˾;MT⍧eo{ p)%aPfSD(WikӲ'eoJ{=,.eS)`hVu(:ޕq$Ba;Ȉ̌,-ba)QM7If$ł82""&+x^.=HvW yr~&8 J HKCCJ7@rEZR^(jسmG'X\!s8XXZʵXg~L|yՀs+)ݹ5XWslHRu6'UsG@;JchSqRÌAL9pxKґ'/γv(85n:vO|;æLg;IR>3Df=[{>^*Ezn9ؘ 3+0g QY0A)1 gALDB14qS?|>!Ӑi4qMC'YɐtG!kC4t6be TWG!4]jvJ=7W΍Mj?XԋKO9t?3Yc.Րuj!ǽ|ο_JmpAHO=@,JRׯRÇ[~7[߻|Z-wZRzl Ơ<>q+bdh&PD=+h #C)rYХTA%4WſtP#Mkk XPr0f곰0_*hf,A%ȧT~99 XKwWNzxL9or֦io 2CkKm ^F"&0 Wf70k!AV!Y{ZY3>tYŀ0 J]=PŪI]N1j!B f-Z=*Z]&0AŲ%z=Gz;ۑގ> 5u_m/UZ 5t 4\TfU#=U#: >]eh%ij|ϛoeײsXM FQlE'&RT:d+tEIua#׎)Z<ڑ׎v#;^Oz;5ˑ}/A1㩋(v*;>]e <ч僨Դ!Ѡz_s P s,ǔNVD N4[T4hJ9#Sb@_w'#ȀF42c@,cb'AUb:;*md8e`@d;M4" o{жu=ljڭ.7-9p?Gӽ/=SKمHPUC 2XgcRMX cb,\ҏs ܧo)0+wL뎃9XX=0 ]sͫK{fI ۇnݸvIډ##LN~FWBNUL }UiV1dkĠHLQ;_m7Q[ t/\923*{g@qۑ.szcGhGxlFe/⧗o^Αÿ}$}_J}W734?u> l_iʯ xc_/r q?*\͌+_ w$kv\ x=iG/uu9; @^؊c ʨ@ڷ۪H^e[e18maV{5l ^lwwf;-~8y{^_.G)ܡ_0xtXn~T]t>~yqrU ]ֺޢjCG X"/8T"H 87ID̺ Sm kD0F8 qSb!oN>[81~N݂X @!$D"Ĝ*Na ua-&4ArD2_dR%-Lt帑h ;ϬE" N /arY 7s PFMH:YYlArF%AhWJ` PMr9h܎rURS4Lj! 2ki{}aNaөyZr8;=wcQ@:r#ů<#\_~.ڙl߉{?@nX>+jZ)f~'hozrzNwhH*wMD Fy89ɻ. N|9P`' I9:/ZM5u`r=6F Pp޽Z(+ g{7u9;rt!UQLg&h&DeYfa-.$2FpUϔjY™N:CD36^_(ԲzIPƲL$t6B#2ɼ*Lr1y}_(@֨^%ye~uh1[v~PvݾJ)[ j"Yڹ͖9kh(NH(<@Mk8K!a:TQ4/F[S Z UmEcQ`!NO')j]":nC {5|SԖK+48/rl.+ߢEx.8TPhV 8c`JpP$~9 4b\mFn0f ]/xPfELTJM4`*[J턌w„8apF%QHTO_|8 PF^Hͭ4ݪv.*i;oPp /iKWMvuSdjgL)]FR1Peu'il&A R $W!m&U(VRn}N7 fP0\OeP]7f]|7(=Ihݡ8 qBDG>1N :Tde/85-ږgeIJ*)WOf3sJg$Ts67ʂAo67K;0^/ EbgjMDt<J,b`$*+:o YBTDTA\)cՙ6r!lt82d2X d`` &(yi!Xʴej^O g&)*֒hW]od :ļeؿ)X|e_R9N蔑oYZn 4cDUF*<Yr1%RKk-y :@(4HL.'{o%!ú6BjP;FxsuƠܾ:-9nI@щ: $xL5 ",6l6^mrYe Ӱwr bʱ,A~ jV TGl4%&qʪ_Ζ1H$ǶVXj %) vmjuU-ĴBlT$S$B7ԳٿB[41f=HP/yUW5\SXYMM[ڛ^TkkM.- ^4ɽ 5@jN!$wWaӇkoY%/{Vbk @_-E4o+QGU4ZYl=jy Գ+h~ AEUkZ2}kz7x.Q$g_".q$tXWҳ jR`ρ(aTSfmC[j]uLm_z[ܥ0Eo۠N)3Hl'E{YҚ\J;\Jv%Q351sR!E1t5 ǴmBWHQWf@|,5% )EhL` q[pH G$bkJ9A % 0k^{K#9DY6)m zD& :rD۶E%>RXX1i&K$%Rlpda]^3,I d- -t`՟Ci. ѶǻTzd}"a7|s8%\UI-E.%!n[5t)f]" Fk 9X.7m]q#|$}kǞ[!m:L~Nu{n[ cOܦ*Eddr1*eK!A^5>K+>B[+Ǣtq xٲ~%l኏ǖV9;%,Ҕ(ƅ2N8GiL7ʸIvP<^?}YkIT'z-)ZӦe,EU(NG)g&9DwbNP*SUfxN%9Xp~q3a3 F'k9D@%Jêsz֣f[!:JRM8Vz$$ pKh ! j$z75*AZH=Y\/o8)o X?]\P光:s9-l r $O!߂\$R~^,b!ңx&sgvᗳϺ o}h("i[: &d̖,3Tx^]nw~X%hªc<}y%[ːf-ry. M6$ "{S$b`G^-GhtG =Rvjj ܰlOi%oJǰ-8JnGmbJ4{)镩]ߑJn+`XjX#f#65u[ozHu,O]Jp^P2fclqPǶ/3F(  #~숗"AÎQ椕 'at45)jXʱslva6`a"NVu8n4ʳ,*AIb2GFXEwAUu&cx" 7G|#2tAG්ZYq3ZSEzʁ534:/Atc dZϕ*>V#9_Q57gQݖIKH K4ұL)A>K #Tz{¦Dq/S8b˦[S{=)좷 qd>E{5w$E!B0I6HCYi`M-F(nSҰRJ&=R*"!nۚ =$";V A^KPJ;n \tbӞeT2V)\3%%R]QM QVK'-j9vRuKܠ FBZޛ}2N7\*l&K#NjާOa+zOAHI^..@`f|Նͳ 4!UgΗ:JF3Ŝ~a"XNi8%qa'G 7DFP9[:XNWA"= $HcTd%.3aU GᆂcA^P+I[)(!V.x9C&v8=Pأ]@B3Qј$wȩv ={Rd ?c V#: D%[ :(!F+ٹ3h98OAbJ QT㨐[ZS;қ$}KE!bVvl'bitٯ=nc^tB њ%JRD_a8# "]*iܫ:Icn$:G".g1*G3My̮09s!a|ՆUl5$8fâpDl)Tkx4Nl1 zLށINa*iG>clQT3T7#UvA4X}Psa+S/7 w!Jc$$']}(bc!Ha k{isUcF(|B6g]&ٻ\) ]&d11I/I] ReQ "p * .\ֈq#5bMUK84F*qApJ8h_.xTZeqkz XRRn뢖)!i!NZR WW"b}XU^ S+A&뾱-RI!a@*%% =B5-bPmMNl KM(%0:HIy?[]\U3iV,˝T䫬:򜳊JkihVXj&>)9mNTIpgS$IQ:aMT¶WB]cȍrw‘ED3mo1#Xy&+NiSc)Y0-끫 1lFjj, H#/58G|D.5X Bc3C5>eW26z.ջ_FAs-Qp$(.u /_߫z'Ɏ~RL){گwLbǪ[QPy5c1QbԊ3bXG)@ qXvcʵ:M87YY!Yl7ts3怜1Dͮº`[9ea9ONzghEQBF AXv ]b=9u?{ uL94 ?7$ N%?a”m&fPi[k/Ѹn-;!Ñ:?F2+##hH JC|F'|0of9JJz=__fҭ2P _ӵۇ8u>޻tld_f{w~ -c[k\l.P`:[c//?7:DrM)oE_|cbZO{j1!@c` "Lago{D  &ȃ:,G#_Cq{GPޥ]@w=wz. Ǿu}kcԾZ^L}׫G/g˽"]z8ԘӦw?-yGTwKNV誆_Z~q],|pU%Xow˓?l}zlBnS6ej͑Nj8pW?Ww4?]0چo` קQCz .o~ JwVkN/:Kd؋޼NpE<laT=|z1a#)(D=T'5r-}@Osw@ ?1:+ݡN.=H}^zkds6qx2]y"x$ խOtgb؃'zEbOͽ>]#ՀRe=O&"#<T,¿09ԧNKCاW糐n|Z |ZI_ N1L80ÄfY!,g*%EApx gL9m}42aw?INIɲ֒iM&'? >k?'=Z.usF' sSVջ]?7?X)Fzi& ra wyv|K,N/ٻ$Cn0c8ήňϑb45c;F릤%RsȦacTwWZ=RW`4/a .@!,V%ZZQD$s*!D@/)U'I+H0v{RAL 6p<%#'i|9F[Z'C3Oǡ@jtAeИy"VKC@!qwt|AC g*xOF)bGVF9zU%mOyUT i-Uԕ 5۲̵BQڐ'&aX< C[)lRq&Ð$KRP֢[{ { %M"K!ˇMn1W{"$8 "!-(L$y?vs0i2J4W 3PH5%}^Q4s̨*@wxȮ8C%4Ļ-X+63;Zp$ՙ¶Zw$@W(tTj#*Ptv E\) ZLse܌zj G3 Ai'ESCIJrQ΂8D ]%2ž1SnGu;0V;MEbn]ՠ@/Է2[^!/ب$d3]yqג>ORrOgUƬ%+'*kokg}9 5&r%t+aђJB`oFq?LCzX!l֕E|Q1ˬm4D) ZКSP8bRk~V(dŒ|G"I70̹&M nJ]XɯSX=e 0-6%ΠGTnGJ3I[:󈥑vZmV O>bDmxF# jO>)X|p:1TY' '< c:R(l3P7%SjgE7W` >x.Qĭh#f+i FVآUL*;]9=]0uImbOhh m?=>]Lxqa7=XS1E{ ?K\]hwk+Uh1׫x͝5wOnZV-H?B]0Ncv|cNS6 \d y-9 BW916S)l 0Y'v n:lc%/!;, [ +) H-b-L҄#h?e vDF`M>Dze 0RS۔)2]:4BھD p'kwAlPpqYv9Iݛ]jK^bR j v1GOsAȋ8$$ X 0~ܵrؤヸPq֯.p=у]4R äʀu<=H LdRE͐Sa=Qg}PM5f=QK]EJI=UA(aCo'aee~(z\ꅒ6+myh'@R2m+yk'z0YSN ;+:@ة ƁD4t)k!<;%- 0x[PpNk@▒]$.pXMe&Ʀg~jw FySmN;eIOW9@nd+g."DD9U2=op,x1>CC$bgu Df*נLbp5 rga.2W ByzS5 H&gX WJ5X Aa!V.=s[:2#l H*bȁuYp1_u4xKxKAJ+3j~Lfq䅾czPGJ)Oe C dWe~ 'C(Ifpܜ`zpz =Kڦ4y@P=J!j%<_"N~=0F*)WJ!mw~TOmW*GL`|VaR-x9M=mاMއmK+ ֌HK/TZ|?JDyc%j5@g}|svV1O?wg┃d+g4nr1 H{e.yx&X C߅8 їSƉB)ō,]k4L8ӱl8ԙ5FKL3-ǛADE^ pP683pA_lႺ#q -\Li(BFl*)C5cLdO|R>IemOj35+H6Z ! "sOMm29mN~SCKˆyPmXD@ʄS.pb#ljj\\eG_tFpʗ:V C=Qa}UVc_}T=}|U*HybLDz `R 3(gB8:X9pw8<CcQ PC/&(& sx΄oҶ !Q(r]#(' ݵF(}AuIny@%]Y0k P&]jZQq bbwav V8Ln͋ZCRdz(BcAPpH}n!\L^B9A#XQT/1ơNG1c@,T8#$7 n_77RK2EHmSK_ꓚ݌/C:"(r&D4eh9s~,ǣITsvw|0I,Aqo~z߬Gzh h'O1Kp^~.n.6####ˏ =ӯ7_E].C5ugXm>$LϮ*Ȇ$Y;kT5D\Xsvt^ƋqX ix$(g 07`cē"zc@-'#lcc4!L(@FA[6-@I m8;g مmL$j>tdzSvW c"m3Vd!2Y#()aswQ!{WMZ\İj<ܻUx AF1>05Ք[ڝj[/^4YuYj("Raߴklox/C )aPawxB \+,{3Q+ ڠED*r1 Q05aTkW4O3d_H (`-q52fq IJ fZƃ",ͧXZAZ/Cͬ" D!ݒ%aA șǹOy}mvfgͮ޲uhk8"]>" #*䌷mM[sH%%}F! v #=U&i'@AH#!8: _P'Ck4DtF Fu  mՆfHh b,Wk\qkܷFbD̈́56N_o|+g>"iL]t3$EDHȤEKR`bP3/o?`'p19/rq 1vGo3itN)`E d/u9I|FxxufΣxjڟz@2iއhefRs_]|zfSpMڶ}Tj--:A,~X.3oMXgOk"s=@U#rgJlN@ĉgc_荆` d6Yfכ߲᯿i/|=SF7V6u/_ܦ7Q4K&ERN:^맮".ן~O^5.Vˁ8xUѶ7$]3ϏZ*_Ç1OnDd2,5)$ٺ}ښOuۇԻ_L׍f1zV拏j 0Y\S*h~ҍ_ofSo2|?/,J3ǔ1dL{z/wR`LC>Ok>1H=*k~G|74Q R5͓nI|Zu72Ik sO~z}F$Fw?gxEƋTm72ߨq|HԈS=uKf>5y+l}Wӛͫ_OE|ud'5x);t?M+ۿ3Vkŏ~M,e}\X=}& ޿Ͷ|WXMt?u@՜>ezL:HI>~Iʥ|q$B&E> ǹp~+SZZg3E5!iböDrTuSU=aEgvx~Mo̻ݣ`ϊiiwqNG& ib` ۣgzY_/5|r~wBM_N^}߳`jWN܍, .3o֒Fs9HvMF{‚[Y|6_5]WdFR5kj8UĨb xN 5&pdNyT.sQeC)D&ia&s}=la ]ܟt!8g 'T-NJP}8G?Gx5#^3 AqBajnGӓ%\mʮY$k˅o<^H"x~[υߛ /O@*͉}+~) D _cQ?_7*JJ,|9etK[tATQ[ bEe '{ʲ\zYd$;&03{Y濾~GL^-qLRG,G^<;8kZ/ǂ;O 54÷]<;8^s ëK >OLR3L-TmwwxO/ߏ͛ ѠX z iș!׸UJW\LW7rTɩ{i9ZWRjuÛL%6x|$SZwpI_ANo}o.ܪV޹ƗnʦN1;ޟshsέn|JA-ߜUҼb HC œNi>ݳ >y?~r{hT| =@8m 8/tYn˲,9gx0.Q] n[ e>Uع;muVjtuls.N{bUO iMl3w'R[?Oi@tz'%|鸔/RtX4k!\*Jޱ 2pţ9$TO 4fͲR>I_x-Ca6ȣqpsT4D@ В LOM=Q}O-$9&N44xafܱېͱ۰0#5]U.GIUTp6qk#C D{^ﭺRŃ7(A01kFU/B0K*kthr*_㺁ީh./{).F$E[![7j86A)̈t*PM4 }$.&Y =Q @+6a;MVkS%Ĥb/dP:G4 Tebh*[P>:G1\BgtFhMNL Cm->4WC33Pv,|WJgMdwY _{ !r!=7t `7-mq"0N $U9\P:jqu& J4оx,c*PLԬXIfaw5/ Ja+-0QQRN j~Sy $a.Z9K:AC=-Ďʥ=t/)iPZީf͝EYjCJHd͢e䘙2b!.,I 9?nи/ 56WJॽ`WI РZ`I¾DHDAL!"d…;10AHm/$? 714bϮeU@xsYm0 &C2jm[D2E` ̒"0S0r4Pń,&T m~BN[iDV*2n*.ۖL*M(-A42IlINd%Ӱ :C8Cf-R.MC" QhX bBdd>P1S_`Jqm `7E|0فѳU3L9֦,y,W1YH)A8 :hJf&j[:a q!PRM3gɀDeeeҫD8pq%B,f$FĘ<a g7ʲ3+YNIv"R}g_>{+F#o'yWi br4|^2N%w%b]l4tBF#F# B(qWGrdKWik8cѓ@TCw0\\ ay>KZO+m+mRIٔYjĩB 1 H`TT0*L$V]H|{q3]8ԏOqt=d:ӞR>_5dՖZpFCKa'n2a~{{ %. [nN!lMqkJ@hdHJ5Csm](FiC( d:jw霆, xVCޮ12\9=ҲA+Jg n˨(B:tIww]l0Au9Tjxr5ge&Wr+]g'Efs]|NGtI|dz{!Lnx46-2xA>l>}\sC:~q rSvM!k7Ce?G}]ίrpUiی:૩qc$,4i.Fǽp3Ўh #,]StglgGuHo>-oB|WApi!1VIIP]> vXE"L[]CI%Jo[ 7W\뻱R5μGNpnh.v_/Un K Y6~vӡ!ћ=4TH#؞rhiy%GS*9d,pcPDjD*HĢ ۔JvS !8Y.QNhgz f2b6XN(O-1<녙gnK.F1k%'N1c ST"ɔYI'zf-^zf-st{k1_=_^m" PoȦ;BPi#,d`O%ap΃>QnNm"Iy.-K$}I M-tY~>;{iӧZ=^Ve6;Sr%ɹt) 9 4ICW* EmxD0Ub2I$ )F0ls>rVM ɌD@YŜVlz!ƒ:ME5D}B`& *ۤ0D͝\{3..%n)ᒓ>"fY{ݿn3}f=_6C1eZ>oѺò< } frF­5UfKJT͛sax5 \)XۼZ4!JҞ2-8WN33}]ۨFP7[Vػ}Tf1 jbB b2I/Q@"!Ta6' v^!(z˼u?tgqnZ NJMOyl9- %\e5WLcZB%ôD,[krl9Xn9:-mT2aJدdӡȊ-laoÿ^#PkhQ_gMmgx]lvѻ;8\άz񬱃%~=YoGדUZry{vivjV̥nϬ(kwircr*! hҔ=uźYJCn}1h:}ź}"Ӆimۺ?yuۄUtIZV714k幥:eKat}4.wmXhe.e/ɇtv;4Mv4 ʒ+nN{@J2%Q(DdRע玓 JsZK9-vF&JbKbL >{+04&\R]*!'Xc'dH̚j[*1v#N:a&5)jުЂv̪ IV xz>ƒVWhIK J#F}^<.alVzgu v<)fVxKe`cAw9v+m `Y5fn'b&BWVeL^uKXQ Xz<(%#dA7P<.A /goz7oAQ̣6Q .pY1έw1OλwomߙǼ d WWJD "%SxC4Q]+\W]/ϷӼ^տē1{&IБ'0Œ%XI1G昷~ B+Vz`.3r' ֬5>Yf0nj +x*dY e b|h%8kR7dجdK{͌&u?-AҾ; NZS% ֜RԒPHȿmOtOn۵ XoI|h5TK\#9o  6 VJcs"O3_5e ޗ5}5EٕH.4\7`ap ! Zw'UL-3 'vNj)څΎt3֍ ƵAOlh:$"[SݺuJH4hb+jigs$ RQ#zq”@NGp2"wH -N$1^8_sNܐKOv| g_{c¡ $f?m$NqyCۼb4I&~H5x_Gi„8dݚdK_K:\σ 7ŃagzSZɱs&ҚGg*"޾ F{ bttJ"D;t Ɯ1z(>$눙TEJiqd1 bWEBkN̳{ۯσ8|І9NQ옳XjP⤘$ٯ:sUcEP2b(N \¨DQcҨ|~4wx2F}/]g5G?#]SqHCW@R1ata: Pm%C^uآ$hw*;rs"N~'[qAoQ i +VDw/1Fwse,9GAoIG?Ţ- qRuZIYe0J.QpK+عB[qLP笼B6%x+)ָ_ JL /НA) )M%DEt,J>Z Wt£P^TWc*T#ٚO>^P wMSR }]Q+Kkӯ|:/5i8?-;vsuYiZHz`(犖ycBH瞣Ci\8T@vN\k@K*㋎RRNj9 P͍K'GHrR5zPT/ZC;Q:Χ3v ojmzp2] Y!b­ S矦?pr=o~nP'SdзnQ&SZn$|a|as8,QDI,h2HJp$NS`N0xτJm\Ll1R*I;!yy;Ɲt>d뀆Nqlueٙ޻¤~R{Ř+7l7[.z/SO?{TO2'8ņrcMlIyjt``XfTؔm݉jmw[8@+1݅ƇiAϛ=oAQ0$li%T*Mإ#)Ls!0Dq݉jqWCm}9]R7_$cϾۻ W^9F4|rg>.oW5eB. AO41" S,FX;\&:0B! Ka5ГP_}\xX`L`'.ۻ7Ai@LD&CbIJӄ8N9jAbI5 (N5!ĤEPo(qoP(ک,@A ](=A%=Snmk&cq}~^HWŹ/XT9Q,OX T.In '^]z"@i`ћyvz;~M_ȟ@bfOwo[Ox8ÿGxC2FD.>K?*<q;vG?@5 ""B8* nO3F @|s(A&[LvͰi8=e#@Zŷ;o]Pȫx<DzA%hL`'],<\1xy Fb%9Cgןg7D:fMs(NXbSWt J62$.bJh"@f1汻p0%kt`zf*/ [n/\|>To:a7W` jB1Jj IA'#l[c XwMBwSj .@\´3ѯtvU@ԍ?<:s|!hFx=UpJ&2O3%\M? UH6Gw7py65&-^a&IĎI#a#M'92>-.ٵ޹2'+ƿw]ƏȎ ;Qc޹u~zЭ}oeg뷮 8EW#$麭N3fG燪>p {0zv ؛Ԡ!O*2g41qDʼݩKM DcI2KilƚkZBІ&ÛШ)*+-m{-D"9E2ݻE&XD^Me*^|;pƧﳟ=Iśg3ܻ̓pnÒEH2!gNvd[ I2!ɒ}wWXLvDY8SP0Z(N?S07KU U~Eo_MZC%O2ރ{OW/wxX?~pN&OЌK7|x4\ğrV ٛ.n|Uw\y0wW12$u]cF&q DxuZA bk׉&^{Wf `=}rAy)BMV/uĘ s8VoN k}+ORl[t^C_R)E;d.P+i;Cr#lN+@cP4'wE!Rpؾ(sjD?pW9_=&ga@^.R@h A 3EmvR`[ 6O4Ra| c:_={~BR˔q㔥>˦*iJKXBUطJjbD_֜|g)LO 1z*dqH*5UL.ԹN sCoyB':؟ WW9^Efjn3;1,(ɥ¼Eph_C.F]T璙kQ\wW2xsնN)]@t/{p4a4 ΢8UtVc]ij9: x=8dR=xfV6_ 0NW`d`N8Q^*#\~D3%aAR%aGY(Qn Hð<shC$be{PK_TƺH%oPJ"A9z$ޏ ]0"WIyo:0JgP GcRdVrdO3_=?sǛf%lZoVzJ$&99rNrV#Y |a8}q_?>#߫67-l-碋-c-2Y!eʰ#G7Sпh6\ײ ^u,dC%d?Ү0k5u1*8xo=z|7z\6h;=WD!bފgU<]wBF1jh=ss/6߸vƶ,rtNyK)#L3.S9Juq,cn|>ۀr-\+ŷ*NK861LTQfcs-ֱTC1\:HkE@]n[3ƒ-[qz_ j|a~2*^wK( u3w{\ iV'լ6cA^mirJr׷LrXޓq,W} F0/fFklGRv!%pfxXL X&GUuuV ox G{k `Hdm!}zYPeKW;{?4!ˆc/-!"uۙe-/N7(6ћ#܂L)OAO9dȓU^lXnBV"Oؘ N$Q̿ gio>k0U?FYR uqD\`bc-0[ΌB3"L"ۙFҦ>PYy>QݚB ?k(~Fz3Ke,zsrL;KbAcO-*c ]1E֤H\Xuo_L^- o_%DTs% JtHCA/0ذLp H3"Avl̸Ɨt=1䶓,/nF`bgovfl_ZJS;@2ƥmKo L ;N RgҢ"\2|kyoQNw\b7W~ݶ>$%o}CGߪ~M* u_'au "UsOB~]I/ ihcq(WZI|=9i$sP_JqY0kLLzޏ*>ـʷ0 vFť_t`۲? jݪNYsvv㨮JƓ4m<96Y"5pA?GXCAR1''BJ LNE1ߏ~$͕V'( NVJ iSU4A AV'ONjPIMߛItŭͩ%J.[CA%TnQjm"MO?KMUhMc.>_,kgHK̴Ò /}L h ?طsP I86D]%T?qRݧsjB%kO(C 7)퇚jFE9n"͍aIR4&w4PKG#ߙ\ces-r4< y6ZdYƫcҟMDW{7 %9{7πlA_~z3KZťrk+ů~܂jn_^r +0lƓu>M^^8+zQLDs7C<[HQ6s$)p,JMKxRL@}(tOgZ\2/=irTr̰j;#BL:jJuU ûwJUFߤ5k ً%WWisJ%6w~.ؔѡyh?^:G?*Ht֊X"߅Ix)@SN]@QuwD*1W={% /'_fW3JӜLBr`9mYc9&*e6FL@J%Imϋ,ā.E.d75\Jt1OvBd\c 2s e[X#.PXʺOS;$-rXMIBA$|3|^}2H^M`X;]Cqeo\ 8@Ŗl^:eEҫEb}E&`11%b]o|e,j/n5bLvu&B1u7ZR.y`ζLb'Dj2@gN߈Bji4Hk6K魍O1Bj`Y9NeKTx2. F0wys l*JM5꿊JOF A'~yi)Ħ2eygXO0@?O/nn0װktyp>c3M}o" m|!4=Lb>Z];e$JGEo[DWsTOS({4ewbŝe,`Y/uf7:yVv3`:iSz¡ QsD e:FҀVb8,pH0:kF XuO؇t[/:[8[)H[6л2deUx-;[h )ݞ:Vق]譅ྸafu Ѫ &2eMv b;hÊ>3T68|ӭ5BF@Y #i7ņruL4KXx]D$ >#Dga|;btu1ѓ_EwpjșMRnAc.]kΦM#M#ZҚTtsr9 trvp[* ҈m8w޻ `IȣϾzQF8TM7Y]R|y썓@!EygցT=0$ ʋe0^ݽʪ9Z聈8xNӕܣL) u* `dcCٖ5Y0e"Ce]-xQ@1*98I'i|VR.VS5%0A7M1 ;!~4@-k|TD%J'!I_Jiʢ!؜RjV!zVAd&8/~,~x+C!TF8, VNxY.b5)qNse0}.Ԩ`ga/z wng6~8(a\0>c4͹Dl+خ^udTªOg݅g#ي_cL:9n˥d@'"l;hD5r,j<7b)*Og3244l5"j#EDJ1= QѮ[O]Nq}q0#Hyf ,,MN.DvP̀/f+SHdo\E,4b>NG܋8eU((,i L )5]ec{F3JΘ*dܺSALVyHc݄7n<+k8&jSٲU{_.L{nQqW!_wUot7-9ȕK c܏ý+MGl lE!E+W&HI!?1~3 n#:ץߗ}5$ x v0c9J:V #~u㜨?_zjuIue_S=tgG~ҹws+V)/sQ6=IiÙrnh:C >Akwq0|k0WwGXy0 r_g%m0L$= 2hm`\ Fl?z~l@Q4M)Pm%ڃOpVI" vmzW͔g"ه2-/6;,^*|LG4!(9|`[;Aq:Є#{-Pa&7b `?10؏~50X-6JSF!.ir"  r MjT t9:`@T^N-NN@"˨Ki{:g2#XXr TQ$#**mu 9F# C]z,{)S:RPJ2FI$sk,{M#DN<*E=T/c0XA}[gp"+rA@m γk3+圥#dgICy76A+;_(0IaW <zQ^/BKUEx)򘗸G&%.LTb%V]pFD(؛Tcd /)AȱsN695+u&_n,/@2X>rX$F"u[Lm$y`XB #0~s`$Le L AqSLZcF(ZS5 j. Ab{C"؟u#D=OEޢ1q/.=L1R?u'Vxq`Ŕ^Q]?tʠ1~G{z i;1])Bb;1=ߡOsV';4(Ё#w zvp4[TFC"IվVN*?LO'M$) kI Xtj15Dp)a*T.^eoʙB;n~9XmW1\S[ Ƹq6X8 4#[׆x[!՗ 510PNI@2i"7~_K(̚3>9C֒O_S{ȸo/)77ͧq-3>JKp)3uqn_^*פĿWw,Vzq7ӎRZi$"HI]`@FYR՘˚NVce֬ 54MAU9=4%DQvpzZ(hb&Df"lIR")NGxS?uZќu{]j'\56נv.e#$ Ex z7ztbnF)k:Eb@&s& Վw`3AA#<3> gMoi0@B9)U^x JYAIYiI'h)(dn>R K(;E)F({T7H$+.G3כQCOp.bb*_9#{[<%O{玸E7,(g׈kQk5S\tm藾 gt4ZMN tSXCis!d0QN%QtV&1ngyBs>¯)-u% ԍ4.ό$ؘ"wO0lǯ?&9P%Իѣ ߁ me#U)z7z}Sr*uuݛgo@۴f@F֓zDW-^$ x"Sr;&|T2˧XkQ]CIݑ]zU#5b^FLghZAҕ՚4xŽځXԣ ٙiUݯSOR TuN2x$TdGEi(Q#$wGۧqV{/iOQF C +0o%7?l;Kp6٫XcLehؑx{Nq4le,N&ʹ(dz-y^]FHBP6'Wi mĵ|vV<5 (),PZr51$gUJK/, &C3U9FR!t%`(F40m&vdq9LI4(Sd.gquO=y]f5V:8Kq.tD XО(%``и{DMǼ38BhX^]v5h@9dE!2j0T>6k0Aᇬ;xPH*/Ɛv'E%+\K=SsSH\3ʴ5լL)O+a%`PIt"r4Q :J`r@ qNrNV%idjsC ; \7un 1MV!Iw>BNTr𼌾jN=j~Z+.L-,O_I/OތۧQ*Eç`?wC~jht;*5HpAD웇|q68><|U`ADTWWpf³?7w?vv#~*Rޭ,;||q.@w,30m /qC^[*{g%6q])O[+)zffG^]GiS$fqD_Ž$➺)dnwOH9S"ό( "tRtR(YŃ/p!{Ť(~AAkA2wHdKczeB/֋yȣylt2k~9W /fyO\UW'_M_(^D.>/4MW$Svnws{Snqh" +}?a.iɥoؚEX2XWg}$y8 IX Iǰ}l!Q붦QXV>#TR?BtA[%xIrcG@KBVFkm|G+ƘS1*?*o,b G!0ca QZ5JIrF;c0!\T$WZѱjۏPZOWKydiexRZIHQ,;\p똆pmyTjtul!.*ֈ[Thu%-Yr̽s-ZR_m&ܡk٫M[l&ҵs *IMMrkLUA117*vx k9sPۉaV02=e{{1}JEbhB̚5mFQ7f__vtVϯ/g6^W3>yg Z߬~Bn2feGmKڷ?w?qcq1a|k&Q̿[]lw#FnjסRU88'FW v6ՎwSdn|&=f]HGE5hq1j+:\w,ɋ`˧{Sn5_t=peۑ2x!Tc-{ ` rJ{+jAѼ+ [0mC+[R݀kYC^]>}ΔOR}X5Qlbi g+pNtS83*wOZ#GG?_)a.oޜl=Pq|\ۋ oNS\5\/z߹^*V)6H A6"S ݱ>k<4樃L7ToSV2.[K6bιħ ܲ׸z@ѬYJ&(y\Ku c8zX902EobJ䵓rEMH*(.Xk6Zlu}އjǣH@IyCJAvQ>\DZZ%c;0)2r$4/:Bк,>;qDi3 | 5V>M)@TxRCmn/ 6oE] (+ wo0HA C_tK9t)S"y;>y,NvS}}h5EN;v9ih5UGǨӚI 90 J[$XOXpRZ9jr_)%/ﷲc7CIn쉧 j+ql1"1ϵ6: Ԃw&l$@Fnex"^EM7C^\ʛs񜕐 PksDߎ`=ue`T;}<>DLvvys|X{j=qW wgwm=nH~ì*ފd L:NvmǞounI-sti+ KKd,_= 7?)>t =HEhfK2}!ڢ:1cFE:;[/yҶ5u%8jnpS#[54"Qmb_^,]|vk9⋶Q ni#! M2J4T1]wa! *ٿvܖ÷2r2CS_Z>>|}MJ8 kҝxHXM0-7ޗEMcMK,W,o޽}13MTjhO<%gwI TsE d!I %kjwGxkPƱS#Bٸ;ܵ[:늄#XAjJcrE PKE(WsvC$2O.uh{rHmD~ OYLhދgYkdʖOR6iPBVP&EDX͕9h_xV dܥRĮD*B6SeñhD2w ! (QlRUx:@Oj[X+ { ~1cwtLH_^&}.qlX jɺvҺ@{] O& ;=.URrt4ր/ VвATGWJ0l#mC]hlTE[:9?Ud@'b$']62:VS]jr5 eqPT;tDmtp=XѲhc.,F% $ylSyCcL(=I nO'mue!lzH9{< 8J:5%'lٻ ^XÛn'~JZzk}r'32088Fw&X3d+}6!{ HV~C9~~0T'hk?F(m&'eG훒fFo{wjkh i{= r&+vo?w'c$|OуcɁj\"K?֣u<s< O&N$qxgI3'wB VJ.k)1QgξZԺ0PvS"R^ aT?<칊te;pD6U?Opӷ@˯;] ĕ"/բp:M -OO}m n>$^nKLdfg糛+U Rd#XqDjhƐP dZFO/-D7]+Omhu&#{˃ FwAz8"!b<4j6D R㽢n Dd"]K ]>_ϟ ~`2v_Bf!foq2ΌSC4cuRC|-dj rfbHd{`Q,3Pp&I'H0 c+}za/a|>\?z%mˆ9NS8$%Bl43A,"Yҩd"v{td2C2.i!Т/_$ sD3Y@'O>(zūY0xnRJL}n[BބqӒnxHs躞yqg{>yw׿}o!4=w\$2Vz޵Rƛ8~D ejw!, kԉrözr@k{o2QlK$v]{f 4 b²gQ?VR^+Ok:)h:wT$yv6JΓg)̢$ oʸ:IZTld6q‘= ]]˶ s_##8CЏ&c%E mI y~ wdьDo9h"0a5! q K`C24Izי6좣緔ʻ4jvNҾ;VQ+C0e_,/r+N_'ڝRְ|8X&1]F½IA`0P@{ҾJĔg6X-T~:$$36@?~Kaeaz7܉ JU7."?ɋF4WS1F=>bZwGN_H`^Q~7Mb0 J۳5nu4yُ$!)6r'*>w7Ɵpn$G8,$gn89G-˗2GsοF_9}VCG4Og6N z{3MO fLQLd4!YKY*T\$ʜF*e[q fL/FZԏ1, C1zah`)Bzv'D29TTu#<#w")Q @yNfsNFkc` QUIrK+][[򍣯LɁ٫k TT W3QT=$(``·Ƿs7HbRD'lJѡ09(|PdiAA`{w8̠s}6J>JB ){cpVG߅Z%W,:L t!8@ScmdI|dY6/QٕD$n9݁MqaaG bgKh>}_ L~O:ܳſt7F3 ;17afP+0{"F3MHn޾;{' Il.933Fļ2T*b},\E+,McI>yi/[bUU!\M5y:<>$ދXɜ6q,Nl?~@6kMidbOk% QBS昽Aq" GmGeC^N[ 斋c*g)]n:qf- dAK$ؠ2:Ǫ]g"Ε]J}akkEaU4פ_kW3c]#Et%/75>؃kBc-ov -@φd`tqN:-Jκ y_3t:}^҂y[W$L@F+٥"D$TPLAks.NU{w%f`A%{ f2eRr&e&[5ۤ-Eٻ޶mW쓃ҙ%@:(Ӄ4}F@[r%qrJDI4 6575*͎cTdBX "1הi@,KnӚ41i_9ߝ;Vh\LだvPrØ,qJR(2 ELu!$Yex*Ed N8ElV%M=G/ S8A'䐠;lob )fDfg25*asyy+QUyYu4F2'8(atY{VFk9F3k9he; ;1!eH;c*'R-u;o|-/7l u,o^Y,߼~ 4oo7]vP' abc#Lg|E,]>4i ȼP[j /y$q-Wa|y{85Av;>a*#Iq\FbFuHx-^Yía♙6Zr FW`jmLwHsA5UKs!UnA %f FgIaF:mcy2g*Υ*zZnuD`J(0nC\= sUbǒ2W$61<܍[(A;Koz_^j'|>.\20_܅lu> |\om6!̻VDo@h.fǴ2mk /%[E5; cTVҰopbngx#L,e'C9Q RYZ .\3 6?/$M=j4}zh [=4@mbw]3ڭۊ]o$\Fڝ9veOu:xXDLE@n"VBHkw4jvb3l'=ۘ|jmu}Ip~hӻ'r+U=ַ￿c V| d =AϮ_'wy>,lZ:8 q \loɾ%\>\'E?DZZB56BZ(^FvYw:02{\uv@FYwqJꇘΣsC /#sz("V?qZ j&Vd@)wRrfsi)Z಴-2&0okWKN$fN5ͩC>x_^;X2fni^)[Riun4 sl/P"B":B]B! `l p+4'UgT FV ]ħΔV3xmD;T9eʁ㇖Zrb{eF*3n_kh>qѵ>-6zz!i3$mcuǪ(7|A\<:%2cq_ p(ܜG>iۏ͖;|j^a¡wF'vPp0aPʃ ${뿼9<|WqW~WxbB[גAk7b|S$xX0|,Vzd>3>d_M(J#BE(NTd$e,>hSx~lM߼~P%cL!͑νa18i4)W`LBdeHjh#{ -Ȱy^-wzBΣs\Kvvcșiu q^ӕZH5&;5aE$U`꜊,P.d}fY"#2ۗQտQ?|'0$0o憐"0,q{l|n6fZVsą%$׋;}wav_͞2'wfu>C9N(; E;RPJ("^W]ݫqE\R*p˪`{ttu98JU 66@]f(!}RTw fDu:= >zD֚36T:[=H?}+))eKrԽ'(HU?|T'c=RX!}l Q, ΣӡP};]"؁--ԏB9KY_\bʒ0n0Np4{$|f# =*n.9kyIjKAZ$+?Kg(MF$wEG[I/^oUufѠ2*\;q}tM( 'wt?f۝ZLm} ,$^tS? M?MZ D ݙSDL>^/1,:;pc .!bD$%pSȽ5ӰZg qgfIdCZW,Q:m9Gɗ;/qh8+"2+'YB )S6_+F0 5Oy>'2T@vlP=V#\m?0 'ۓB8WNiv;⽉:#rۯg` VSO~ƷIF)°Sm^0mg#@gU"UO5/ocB'o8Cv0}z)#- "hMJIl$\Ip[mƙ<b,1~E_q^ [ finzxzegQt2LU(y)rJ7r6 %^r ==CV绾'Jo}NS`'q_^Ee)oY#ƟQ@h`PJK[?|ڷWk`L3U׭HT3vS*{u>yaC`E޼c材1$`@CcRx$#UGe:JlHiB49ۼV=nf hɵ'X#8;ƃ Z bt7z 9u8]sRЈ s#LQ 2,KU|cA;WJ/=U9=r#x-UgA r9T+CcXvz}rIŷ(XrJ(\% )C8<p~4pˡ$ᠬ`tKY2Vqt7sh$1ܔz%]fPcY˞G׵B0AkЄ[7~"%x'ο^( ~Ggkgw:{v'X=\f-Oa_`\eeVONfHIxߠs<;E; 3)qm}cרC!@9ٜx. F0ID FɈudafDv/؂ +'^W&Lπ] 5A6-)f΍PM~|OFM.HJBH+^ BUQJ<0 NBA"D^DB? e Z"{|Tl辫 S¼y;@fV-lwW~wz;ac3:l<DŽ)ΓLei23M]O&.D@d`׉Mt-9m6܇1H=Ja愹r=BiZlI٦f̑UôcLE(:"JH:TC%TrvX3ȴ/Zr,TzYdgS\\wz3ҏ(ZbGLD(JG4X]vu.?k/뀽q rqO7Qk,V$yHNINFeNI0\ƉOߋ8!Ƿ\aoنn" E-ie i4 PKLa-ɔ+#=q&H2̄a'T96\'ŴQVIUȁn$iHmb'Sj'rX1roTjCQM&k b!Ug'q&k<@> ZG aaWܷkx ]c'0EM?Mu: \Ȏt6#޵Y|OYƤ fXEH#RfCCZ$\e1(ED+.L(sqdn S÷66=k21ƓGof=\G e:.]]Gp*e~Jowf[Eť 27|:ƀsrCem.ļXdL@Ƅf˄ϒ8L*(v5P1cǔXB -Dr"ٻ޶lW\\Ю} tz0$Ho/Ě%_IN'3~OQCZ")Q7ZHV;UgP]),9^Jjc+̔`)K]6َʬG6[m5 y%/&@DYuWy檫‹fEhK`2(:d!Ӱܮ/{(?_nM'z_OFqtű oQ㞥C^} Op(sj'}5oim"\7r& *$n9>IDXnԠK;/GZ1<}ok!;SQi yEtY[sUZ+uN~M+mK֝D5_PL;zWq>jaWYEtd5_[]I|ИUwmk^,%eUgʹ2Mqɠ,mXz? 5싸{]_c}zoqדr? f -MJi_k+W'Q!wʣ2vfBM)w!reXsmp^Qz,0l׽_X`n1}zbC?sَ ~fsϯ˜ $\U?/n%tF=~UIc_|׀pΞߒw~d:{ַW-ΊP7`r~hǧ\31'ݽ0a9Yg.p\PMuOUäm-/؊.7F3(`yd 9da pd8[cB"'(͞b GI'\ ss(J\#f(W4pn wAâܯ+0KǢ4M?4X_.MS8 HAЯw 1.? S %$! k',)%G݁jYugQ$kSmk̈aFa3¥FurQL@3h fT.K^̎DLYGLsv qDXRdH"@hQ`B="}sY)#p 4H}A.%*n-l|_gxW;p-Ų/|,e|]ӒR;4FWFX#Ϡ% u@n4y5 }Mncﯞ#3zv>iprPlq')(r&gi+J[bh3){{$΄ bmNxC 3 6:nxiwGJ`@qa⤍0` s$0"a+Kw6-{$:\%i[Q;dѧIz$ ?m~c(=DpzȽaa N3.n=f .w*Nkjz0MڸZA$z(zkLVPl)-6*(OPW콋&UQ .֝7s~:c܉H,UB"q/+v{y`fOH޺٫<*+ޛ]0J5^] x댋քi3pWڙGb%9i9g#6^|/VTbÆD⒫dL~k->@LR&SZ QTO*\OG?6hd3F=(3ƆjSF$@M./2L+M6TnGtgxR$)e?-(v`G =RL˓ΪNjLVIcvj ;ƨ}0'B!rhCqgOKκq~FS";g)e7E-A.:qZ0<3[cP rq6Q@4B@RfRㅫto6p,ۼ{חh>=٭рGh.VѺ񨮃:G#8Xq+`Ds.V2Sr^놣y(j4E2zzX@XgN&cVzF˼􄦠P̟p&r SPki̥Jm:ba)LГZiQ;Y\F/IcݕOTR\a)w!reXsmpQRðA^ׄl=,N.*JI O iI A)rZSFX#)(@q+>HZ# E]_S|(P 9%z]aApm#q${ q7pT(XXmjU{4f0&-@cծHT泟Ȫ]A'}ֿ:RH7_?|؛_WI798Ś;dc5Ta6&9$@B>x4*\I`&j4v*AѰ1\b΅s I(A:W%ʕOZ֊j f~UrwϤ]5^M^6n=)J>pPQ_p Xz}_ H |LJ&_dgnI!DD7?=FǻsT`\@szX7Q%EǾ 3˧m{'lzy65N=8CV 'V R̨\GO3~mn,+]r^r?فo4o>_V >=fP햂淡z(z?OƘfJwP-Eu֒:=xux3~6UGGa$'M4;Mnh~Gͅ4x̔A rcMV!^}ʿi3ɨLb虆o^߽6a;{| DjT2sDeBLdVz׭VBDSD^_͑4 xOw_Mz1}{oT)Ж~-jAS Z`ya.#;5$Õ13w*`6^m_?X^SҚd%xEy@SnM_:BR7Q׼/hv=%>P^K;czȿo*1=PS qjÑw "gu|w3psG*Hep|uw}MOr7&AߨM^x$d$$}0OUI>tzo9NMDn6 hه'yZ]>]-}~͋ s| Ev~y1; fG8ph2Mu'?=/7k>yI'u37kgRu͚Ҝ`v?Wc FX\]UkLܟYu8!H Z8hl$ߢ~2+t|3!gO{sB;fyZH*jԭ]d+i1c܇u-]Zx?U*da*ZUHf%ZdE~i%eJSsTE}%l BRwWhvrFj|$I1 ̬ p;5dLj 抪߅j)X] A]aRYz2\6]VV^ceKW,f{Nض7fv?VTB,y lR{u&-F% EˌJ i,KՏ%stbZzP\ q[#k,-gt?CXaP1RF+G]Bhft^Rt%`|~@..E-f_qԞ冥i| <#\8yz.&t 0$]E0i덛dlQhC#*[9Eumymkx$G\Ӎ㶀CmDwjBPlB\i5C (ho-q \ MZ1bYbZNu)NVRΆ{ªVro t`PʄOپZ B-۔e1 b ͊Sak5hxPacVZ85ʺ]̈́*R^ UԩVl4V0=gCD̞\:mXpx,/U&kgr[2b#Be2*kl2Vh+x!YXr!=&SS'r6 1;6^%WhIgMg/駓'J&ٓLJ+Peje6>EUWpZMv2@~l8`U(rkM#S0Ô>zaI)U>}0~}`-A]N [TW6:%x7U )I)(DΈXLWN^ |z_F&dRPE(V~| (V.BVփ`+tS ?tG EW zn`S>QlWd' ؆;sKK!IKaG sH/ '@ Loxl{5&|eT'%9Br*ʢ2-+AŲ/ߟ>|OlP0V*B4u'tlG_?*'例x `Y\ y0o_:O婩5klQNg k汄X!G؛ChLɚ ]R%BPJǖUb 2<|J f#ezţq !PIqg΂ġ ڱ 1pb!$qS> x/Ev]+i:pH&8^k!m@ zӻ&F%,+zD-n!Gt[5`1/Q8fZ'-خ$izLarc `d_P[-l;>n Gv_>t j|&S =ӂ2Bvd}"4a/M. F[D4մKeҒBRS^9m¹$ {agd6M,ZНQ{|a?{1& )T%tnFSQڌ.2Iݔ^(Յ(èӬ^3-:(%=' tepNB P<'tBnJLnA`b3zLf{Z[CS N1i}gևJkJli^MWV-+1Uh؃ʑ2fx?>3so z6¨%_T*5flG@A.Kr>TB|Oy4#QRPd_Ʃ\PwlIA .,1;D6)r1 %x^qgS]nE]IR5#Ŕɉ>.k4tJ|8ag QnC^Q7P8eW[mG]OSK(\`Yn"G0hns/uaqz{'E$iz>7?o(n7zy̜yܽ!cˋó`߾<jwN~{&8&.6i?(6&LD&_7q~QǍn4ƒ~ObVWtCpJF N i[Юkw1If0=. t"fTsdiV>Vk!*9׸ukl& 4dR2URdflJUCHMz1}ܦ*%yvqYE-h A -pCjg_ݴ7Õ1w*`v Uv~XRf͘ҿBI~6G+f\MK'Q{]&7e@ݶ{:Bs\\׉TEh_\x8ruA#~Y݂2Iy<>ûӾ~_|=Lރڿ]{ Ƽi7ZJN}DAoް躽I `ttI78gbnn'Q)Y[3lA[] Xe&{~9 ~$1zBo_7q6vli<%^^%}K>ҿ˷%YTz{8=a>Xڍ*\XPLp&^apE9ɅXrZ T k"0'E0-8e,Wf)[* 0-s]Fpv0R^e(=qvlb{/,E&xR&Hl#I0C\f~ℹx3V><]Cdk Q wf҄'`dx!v 1;W ,_8ɐ2Jm4}D BS'P\،Rl#' OjNg;`zYڍtIAۇޥ2T ;/jv-1 J|mf0ɶR\f%=f\\0xDJ'QIt| c0"Ur07B/\g%0'mRX˾}zuadY]it_E \%t'a5 lG/R `y"۷z$j@e(A!1(ggbI*H]G8̭ -VMn Z%5_Q,EAVТC*G٤OFx*B!&> Cl{+80 Ǖ PB/%=r>f̗9@ۭ >E˓0R@~6 "s =#!8!ctt@f\' ~'}J07-""y, Gjn.Q>T\8\j1iS1O@)%]gGT*E̎*A$=%V],z\ ߴ=(sY1ñ RJЁ)0 ԓ6ہC<(8ƥ|+;[+#9bHsc/~ӿgG ;F rۖAزƁle1ߍfv L9 nW$8)nCD.bg%Ek*^ՇV/ a:>}{۸Ev_C0,93d/ [J'S-ERSMRT`.VbWU R#>|W'bQl_"Z3U/۴:H=OEB//Wby𞼿zpQQ@$ُ}`8 8C!5;P|z&> w;ܯi6^V )ߞ}7T#+&(v>_t dE%3"GRYdfjD0R/Ҡ>|)TpX $2@%L! (԰.̨aSڨQ: QzpZ^୻(f;vi]{AG*u˖kl\#8˖Z%Ul(DEX=!p}Os\Q:j)&54 \qHXLrJ֊Ʊ` 紪8pCƽA~D k:ZF{] TP[ w'ni2F#)DluKW_f>:zOaLAf Y!r5lWf˷|}Ί7 e:"-E+D#Qhy#q&uO;7 xI,3T~C]Տz1ѹF[FwFYF +஌)OS~{&Fjd /b}nZ"5ss67-!FD}K&|A/k ͟jUg]k[U > 1wssՁF + Fsh9A6{Ahz܄oJ}0r>{b#k= Hb3w?'ʅŒt ^@g KwS;n% Rm2 8OY$uD"r R. qvY*r)k2%>Ab]3B gtQ!=Lg]v)~E?EQqDY2q:I9PDYf=j.4ͥ0*" |%Li$vWN%q9Zj Y kƯ}Z9^NhSr?NNJ:jl$[~su5wl󀙇rMR`pKoOmK[6m_yy'b1~U* }b"(*J "Z~X7OH~?嬆 f*c*q7 ͗Do7={9S*\*̦X`+U" F'B2DfA:i"bHڜw At{DF=Sbdv93p qz9 K EGI$,_~ ?mDO=Tq96=$pasƜ@:'ةFu\h>&cUݖO kDX8=x<_#kג &z^cFq!s"mzς8X_+x3sLm1eH?~s7{4TytMהъ v**=~~hsmBJ1گv9wQz% [{Hv k*C0\v 6nz{wjWZ)kRdڄ! L,xT(]8d:'*˕3$ά.74:5,cBFh%w)$AT*D0e96 q$yJJ]6P$^кh/bZR^h)M(]{4Q*_FJT){WENd)H_&߮~b_9~&KK'ŭd}.b^{y-海eD XHaET VM̙!Z3]WԦ 2"5^s.r cd/?OKߢ 2@*gT㯮z}jo^e_Š_>RѢZxk֥~>~]/e!śtvYw^U|ѓNS>+s cCU |Q'GL7|NU&U MjH!Enu. U6)3`;o x(8 < Ιm3]`s K̢SB:=ܦ2A3 p$;a!SeKB eTB,˜1a 6j|TNu"DVT<:P/KK}Tms_]L}#T;-ſ;ZrC'-ǰ<+gŠL˝4Yu#\nkڦDKrtUUwj xnh^׻ aOr^l! B1 gNҥWydTjɄ2] V,wRI|f,3zԾhwj% C I:+:١u0:H#`u)f0sJ}J//Mqx!rAMѲ;)}U;L؞=򉩠_Mb0H0͗%^u벣Vް5`O[/c`}!{Wy{@rac!'7_lCЮ&-Cޗ82Ji=![~{@ H =>>hYN#nu"32”֍Q[)r_E mǨYy=NKW`jBt~KW_f ; On^Ӄ358~mqذ_]Nͪw߾tV|R7鋿O cjJo`X[QDfуX/>:רtͨt̺+*ܬ De?1F^>{ LU XU lt+F ;a"2hp+8ͣ#<s3ZՋq FO@@Mf ;tFЂ1yҝP8b2 }^ ?:g' z$e\!;5|Y;y[w`z9H{9VMr~xp_Y ^"n̲CXZw>a]A#<7{JH EZ>7+H 3e'h.MKc^3tG)kυy1Y޸+0:lb%щ@Ws"gJt;`A_S5^_KUAގ Y#^o%G ["\=9BPM#T՞FX՞F=]hy+նh:dzl2)Ak>~F(z'M,&˯Wh*2hrXx9MWx1>uLfbrnDd֧$"GL0¨L˴ut4szE}Azwx g# 񯗃OmWwXU48f!63] ?Y=VJ`T {͖whp(1JCc\rBEQT$dT#os,k 5]s)NXHRJdU NZ hLGL"ʼn[< c4ufyOc1 `7x:J+0|3VI-_{~+/"EzŤ'MEq7F^d4^Чx{PڿRem\\+A(!ɝF ,Z(3]n"`S>9zv1i?'1)%QZN3MJ0:D\ Jh`$|+`8OTĨ3VrFPLuΩ8_t q;;u7:O Aq$s)ȈKa3G$VQO!e%E)]R6nz{ܟ\ tLY0$P,K$$P]=T$nTA a1GB;fÎCNdIi`kU ^a 86& *VQmFR~0T" [N0o 1e1xN%F@6%[b~suuHM3 -yff_nH*!捖tFk]gނՂlGt7+}WnD^DY-[>|gl껏?9f=,vaDwILt(FJmocd@k(pt@Eء\v6:'QCK]Uɭ̨0ıR ce9뒔 Ub(Nxq <J>%Φq<0} -*T翗࿁9QYU29SRn=%:T/Jf3"ҔbWK;b VC9!M#|0%ufFHΟX4OK>gzǑ_1rva7oKppb]`{w )*IdӋʭ۲)KHONbG,Gů|gTٕ-G軰'oK)ׯ75!_wI/+\=3=.׷v~ KV,(5TiVv.gb~.6T`Vak_"u._{jn&'VsNH2,a;JfjKT4JZ(pݡPS /K`vqr!96j~MU{Ɔ.M]RmRzhͥbA$d>ףĘm>{0[0~,oP9kUnշJɥ%,ZΓYݭzM遈$&74{{fIo>?i|mzfW3ͯPfв'{|]n*ӎ'_rܶ|:0E0T.-|@R%DZ JCq J*{jy}~sM?8+Zդ%^_¾}my*Vq_QAgʕ;v˪!Z*%fRU}B2,:ZgB ]%cIeAҪ'ɨoV3j*5KFizEx#m8|SAƴ%k_Ny8B ww~HF͵Jlw~HJJrAa 38VYo rpj6@?{srͶy0{w^\~qv01?b0Ǖ}7 ҬUvM瞄1s zbFE`џe UحdTRwŰ"[Œ27t Sн M 0(c )S-D%7"OD\1r6"oD iɱc8Eh/>)~-CU,AwS9їK^GFFﰉ쇋.QlSRy=7 2:Y!)ǵ3p\Xq#3 UIVyxiٞG<|O{;!p(G \$Hο*>P4p2(gBuшpYۥ "s8\}Λ8ᨣ:½UeE܅; ֐s!Aö烐!Qx!2auX߮FrjR]t,ِpe8ʌ;'! 8/紉.<U& 3QT5S߆t AE\;eM3Bɷ IK3)*U|4-SO>fW.gէk#5)E~H$,H?kKS7Ĥi?_ nj"K0$kS/ A8G*3 hꄐ ZiyVt0+ڞpT89d! *(BcWrpmOվlg xu4njQU1Y`y zQN1E\ )21},m,1#솥݀AD6,£7@ I)ō wݯ4viUb! Jk1O$ xc|H7?8K;@Kz?@FT1kJɞb!i,@)Y2 @W'D7`ߋJ\7U9o[zE^ėZT@0&9 kBp8,q;Sna<ϥ_@@Ŭ1^夾M֋NW! K1F8[ȱ *ϙ.)(ԀU==ЩĀST|H48! h`<V͒7X(Uh.;-&jS\b{Svq͜FۡoI)J=!4\v.1{ n[m yVH*:( XG).זB "ҖU>[K 뾹#O+@? 5p8h@Gg/@^Q"[:'貧 wY߰Dkrp+])sj+TtQE *jܴyFx` E_cs_&&I=(Le6; Jeo*QHIgOy#d{FQ-P?5"G> ͭtp{_JvYѓQƤmŘ!%R қZ|qyH%I|l?:!qѕ|<4ac]~|86w~ 6>5M4(YèeNH4,a*bq6sOjtw03W73,f~>ku?luK]gYT 4Q{_Sc檎ō_TkQ: >5;-͝NFsFs%2, 'pÙ{N@ Sb ? ֪$6z 6l:S]DN+i`Oafzk絛q E!3L ;<5Jḏp}MzhwxWug%{Á OvݛOWw!Vfi_oW~0kzZZғN>Ûٟfb׊zxbᓍp"M,M`\'qNRnU>*t}=̛}j t]1FUY'zr^/z>;Kc.kBew ˗ƵuGO13>^ ۝81@RX˔EqCZYOoW*wš'<_Chs$v~}F]2I)V$!`@ QXI DUXiݞEԚJ.cU^2@ .*sb`M3 `Zs<,RKqQ8C<`T:hPg~0 oeEP{Bjiƹ1j0[?10   " *a #'0 VsR}11kC7RuÆ]ߣbD ea\({sjo (1L$LH0 Z`=Xl.jq{ʬ^j.I< \ZhHPẌ́:,2.c Lử ͉H͉7cd|&1)`RyiÔ&jÈ0fXR[YPSͭһF#t/f#zFhP谛F1|VH$u0`upeg`Nk0aT( 5ȡGx,l?ídv^Bp|r8f:}CY8 vB_.~}F+*{p9iEGEw&zpXwH}P*"=\Lʫb,>寿KLNHtLvf'S:PكLך*#wmH.7xw>lVΩ8~IJWkTk )"9Ԍf"mX"1@ P5)W(c QuDHW)"2J4MQwSP/$Nh5nJO 6ߛ نbY=TqQX2ԡ/uPƑ.>?˙Ûz^#jH69I(b&r֑F`PלX*8DP '!^6L73Ԁv^0k@s'+)8@;(%nqP΂3p4M+^y9Iz-w(i*(N5 ?0 \)|8'ooZwgꬶ|4"ZKsdDS´z9qЫbu58^ͧ8" eA]D4ki)vKQĔg+ fec^\YZ<* #gϯpKv4?\ᷴ(yU]}fdg;cg&K~}&pm(;ed@=<=RѸFDKIvf W-D/y[uSAyBM؛B0 `!2\e V(Y(  -cDN=QfC`6UYp^8J6q5 \!9cBSGk7 &˵*XIkڐjT,AڂB'TE[gqGX`8C2Jj:.,AE[ZQk07ToꦞV𡄓h$O]ЈhT3h\%;hɈؤқjF.N1$>RiT∔FMD=ݕܪ pd{0^lYI  ?h dfƉYS .CFn$X=MP%2iPZh >6 mi.6w8n&$- qV))86)!FvYi@naˍ0ZQɍRE=[2Bn7MBz,DKJkHqhK7{t(;k6ڮ9tR`d6^͹P(&,;ˆ0\%rK缱gb4tˡ/rPN/k>ǹ@tE< ˛7ueTɥd(ƛGM蓜=lAw Q]͵O9Ls +"m>4\IggF[7l\RI7 \s}zWW0GcTR0 M>X@Z!IM42 FN,O3]C_C3|0>}/ Z.8 dzN|c\p%F bxIM)AN3+=lgLJe[il$km"9M z- "P~7u#4 <'vBLIBWוjR 'a1u4%C+$j9 =&qO`=o"lmr! dj,lwnTeaObk_t t2G2_hRvf4֮ V8]Ds']dOaRE뷜EWf-iې?}oBQ,ݝtucG^L>XG".ٱr94J{afGf[߽pe[Gr"r?zPf}~wsC7NN=dk\Ojk "M_Q2JNzbI!]q'O^$'˪,+_n>9O~oJir~ZdjNS|ieAA>Mu7t-'YS.$$ۜz.9mzr9.tB"ѭ (V^ .ЮG-5kv@P2+SCQYuG9sq)`ݕHݝd{ :\y3^ǫtò#e86vaj}0$BjՎx:gM>Wԛɫ$oc[1u]u']a-ʛ&S}Z5g!hqX!|^`Gv؛C}wteW3?oy\)jE^,\:IW@C ΅d(4gOj a?,87@%C۵J|jC ;ZvzhڬTmsm`H8hAʪgڇ C@9k%_Fwv}ZC*4`5c9LjһHcػ@4p;ڢ~z"r- gg_ gP*]VUY=W9j5gك'sn4IZxPb'Ɉ'иI&wF}+W\/x\?|y9`1+C-Rrcz9}(Oܒ -p{ ս>i}ղWTqZcEì56 `AlػCD>n꺩 bK&w)CDJ?jrWDGvGJ{w8] &;J-ih.Ь??.G=80/>xų,0jGز {27{P2*]GS*LP-M-4nY(R:{Q[PrXZv0m n}аsMv1.a/GOƃ$O=&~ď~k 1Qչl/!:+t^~Έ0UE|FS:*GWE|?˨ KYR˛N~| v,930,>rtOz9Sn)oF+kqunl{Fb9ÙRe)=*}Ɂ i>X~6B1-H:|:t'T S*y @~L$c7E"kRؔ/`VV&#.1lJ%Å/uR&WȈi.1B#Nca#^ܒ4a$I-* SÂQX͕(0fZtp##Ј*шEUNϥ>dgЦ T "7AH@pi .z]6:`ZDU8ƥk<nٍs3 X"j8j7%,$  3+ Ozc5c֎  \He! \2g-.4w̡NQ"jjj`+ڄ7T#FHC#S~ F@/tĈ8{S!&-.y 4&j4BaH]Э7rxx$l~WW%!7WTyj 'y |fq/l=srwwO\s*Q7wߟqYҙkYkD638+f3gBPtLJΎSw~Zz7#S -z|_ 1r_tu(g Ӽhc:Aí^q&{Ͼ9ν2S^:FuUK3T &Ԛ`4 Vi6Q'CAsf"T T4UXQ&{Qx[;Gazi[mոn‚rpԀؼ jY)2[ Gd6'ӛeAΆix ҍ.|4jBr𡏃Tf,˳_gwjMg ęv2[k ~;{~Û.ڎ<Ыyl~0,tOUb` xNs4Küv!=ć8_a# ELIy*CXbPGtڎdu{91Tn_օ|"|byf^} ?#+Z* f{eЩI&AƼkw]w85@{p4ա5{Y{@ }|L{}F6ҰC@͟-}&c\3^X˙x庣DN,8t&,*YB  ڀ\*Fٔ0o0=ׇ>XOkATxtk2m,J('QQV #h]4X~OZxHrF#@e#e:1kƌT6vZ*aGE@>P)o!蒍DS"⟸c1?{Wܸ C/P}tD?x卍v3/v( Unں}@JH@,8#:@ QYYE֙`[k˳_],QF-U,i2bs54c(A b =r<4װ:B"sje4S$#s.UQcősB:ư9=FAQL+#).I8:qZm I?z$ H?hޅ(1dIRN{ U|@Ř{WgpNN9! {)8G{<` pHCN`ݗӈc\3Òz[XfOF}]縗e *2 X p\RGq Ka09 2b8K@0AԢ$juҡ"h+$fZ{ JxU( ?hIn!H&,JaKN`$Os %| ׹T}`R.`BXF ~aQSuHr_!ݨp<+6xNa@hFDNך!Z୽)RDK0J\!SPb%riUԺnJ5b9-5]OOUzR]ٓ/lvi'lAzȂġ~QQvr0"Pev+C~TdԵ5֔#s[뎫;`D1Cq-ƓkNWZ,d:Yz@xiT&kd݃;ނCjIk#n#zp֓U6H1\YYGDzyEFTe@WYZDhUɲ?L+12(ovy'^2M< /.^L61"5czss qA&9plѭl6!soHxGkXQ[={벯_xVV p ^51tW| +@Qr^=}gqEAwqt|KKpT$8gb~FVDadea[YNQz%%3F#%1gVyʈxǫK5 4!qSV IfH%3 ^6$b@7YQ5A/`!}Fwa ޿~L4FT#nV1N@[Y o:4MJ;e|W*F]eӬs;ߝ}T+ՋZ_&oC4nn)e2ZN¼Q@ɫ *ˉ -A9?"5$ewU7>."SRL8븃w1,qԢJJ/eJO;LXǔ|q}[bGɋ|hN~/bj(V4< 6׌v%Qo2> ˢjpPMݱ.J0:LuwR)U:3O1[*u蝾6Vh)GCߋ[<y앥yO ɘ.ӈ,/XF +ly5KɬsNf1e. ꧖"u>"HF8]gJFx0 #e3UIHfe,%O*yF-+IPӪ{̺< Yu/_IEF3_C*ƛlj7)E(7ŅgޖcR) Nj'wC>AnU>sq$C ƺv7moHZ 1'̏{wK!f v~}p2_n[INrXĜ~nOW&I1“J!fW*U k:R8]B˰ ch_pqD^"8QcQmJahèj`tl;\Ul{WݾV\[=eTI;>wϻ/?0K,Xhz5/:m.>RVjګYrdE Eg4ė`_lQ!u oi?]ۊ*q{ـ=x3c@ߒIߝg:; mL5?9&OP*ž2T=׍ ʞ˓8h3Fc #$O:Rh6o2)d%z/%}jKji_6}VLO~9ym9jeht7(AP{0 hJt' #YK `qd /!+>H(68.K)iQ|iR '℀߃5BX"QІc"`|B%X~d߆d]OCR<(<$,}ԭ[3՜ nT'kT.c (ܠA]9]Dސ^$^9VnxƘ!(;"95F9C VmW{ړʤ9;`8ܻjw\b5;qS}3[ @cw4Oa߸tp,TBo/HeaK##d<{T:娄xI=MkA&2-i!-LW$mq !2_t͸\]Ow瓤W'[O>nf]vpD@۷u]S$O.u٠olUFR)wmo1Y3`4V~hRw2ٍٛ \®ܛ^/M s ? Ҹq7se ?1`]"K 7w~^#Z$LSvT2M.a[܂Ĺ%#wIt;f'̿o-g)JaK_TX9__7E4E0OoX7-uK FtRƺo x*qn;ԑ֭ y"$S\*"tØ^sC' BՒWvȔ8" W4A 2={o 9%/cAPCן _kdIL+s|fvrܓ/fa?(8 5,y=_p?f/|]4%4#MHy8t^=Ҵ̣By S5s2W) a^ _}>XFB^iԂyCz=Ya#eeH?1Ė0ya+#Jow~")`n6sSL qŸ{T .(i ՃURE!jJGTjm:mّ7WfiNʄ FH Wymc*/NGrQQyي= aR1haeHo -9=Y%M\N#0\16iT]wc4byF#v y\cbmmY*{[ -VsP KJ|l֥@䤓WQP<I~g Е" ssK\y¹XFi+a h Α Ts/oQIK)ABsOamɹ@*k$ ! dsxc'pn=(; KγiZi0il:V?Jsc, ,`T!*:W%C;% @ ՜Si:I]ѽwg;^%ewRA(sBQ;`O=/BW #vZR";۠:WoR1)M}Fse[O+JO"һ.I2%mu1XT bD'm>YJ*gnt#[EtMpLSٔμs" oLg7C8p/~Lƭ9y^]LWG w!Wg<ɮ_>.!9C<J9d=M [yhQ!1B?YoN̢iC`Ѓވ9Q[yتQ8 72vBx ,439@AraQhSn1(Zi*ۛ Wb_CaCr4ޯeE?,wxCK*d=}3r' -`Ory<{!=ET"=f6_ۋ A`MuW3f~E6?^]ږ9׀'oRR%[7ñx* ,JTP1rJ }fauš2X pQhr 6ٻ6r$r]Ge I[L~l]qa#%n=Z"RA0qVbfL@QPL Ҧ6 T~a 0'.5 㞗b0RW8 窹sCw)$YFI;@ke:5V[0Y+JHE$ǘe5X\cMwZ`f9@vn_*ʭ+_aI,qsjw\'; rݷn) Aȵj}blǠVñbKԾY ޗE"j 04.OϮ)aRjnom:7]]p,Um;2#;P^_BZ$:**q"A .`"-Y)&r]$Ah8?OUѺH9T* B#` kյu6cZ"vXrr(rc`nVQ Ү)Yΐȭb(#nu2Nr#ui'vY52 Cu@h!mmi^KY@] ,+WUkgtij9$bi[lt0 , ,.ݍI>[C$K7̱<'nv R}_盥Z"~ V s'q4x e,5H@^PSphH[5vMX̛啃ƴiLB'1Ǡ/ $Uc r wʂ%[k_5KY5%/xT. a,xUQV a (*BI ׮R%ߞ?Yw!X. ୞h^GY f4 `!0TRsn)"u{"^Ap0hvVa}tةo+noNou:BY-3"*&lnmb.E#5 чU NCq (:*n%X(-ŭgM_ J|:P5~3 *JOkAf)XeOD%I\֧ZaNDG\屦"xd?Eӗm0.U&`"}tf#_x1%w^ݚ'R%}XW`>\Z{D+Rb2}8i$Gz֔ &֜vqHH7]FqNT\ů֬rwށ812:3`Ö2O(=0݀q)JN-m.g~/O|nUUAZ /+w u8;?fH nafw4?(Z%mM':Rj +q9{Fr#uuAi]3^ߊ H(ԎNy~^@hթyەx_ׄ5,'g|cg,:l٤$v?_X%7[(.rt"s `.ILš9W*ATv{q%WPw\c  ~D?@,%Ę7DwcEc/ew؇|$PfutK^W,t}D/J f%cj^cԠ?#${XJfFA2Z_pnNPk_@Z}j 30oLEM&Xs|icz Z7e B]bҾCa`~%Ea{V8LXXo (l& &0ۻ|_ŶAD-a뻻@#0;*9xyҟT;jfq|]槩I]~pѾ~pN^2vѓgO|:C=O#ĉ(Y& ttc~%{*ӊUurٳh%:If SX1\T?ӕS+NWN~#".D6,s!ըHsͥIDpf2Jh &)s~1hIAc/!tdN]ܘdI=x}&1!Sr)U!j !D.t:{-:S7g{m,[%93b8 ®u79Fc1gb|o2g'Ʌ@e]`1eGfhB7SƪrrvR0jπQ*%o#xIn;J㻽B j]{wCVb5ȞWzBi esLcP ҂p]Lo猭{Qy=IH%V~ V~;g~2jQPu;F5BI1vv1ctZҍ[T>E}09xޒӠD(CK:6Qג1b*&uܒ1hKԹ q9eY+9wSR 2YT^dHSl9'Q9Ise8⸇ ,Jbz*ch]g*!`0QH%4Á@j4W&M4iġva@seC0;ֳ,8 *I!S" 0 cҵfp `=9Έ6XKN!ReW:L2Y82nQ23:0׾/[T LK= ^!B{ E!7.Q2U0Tk0[,>;Fuqu+ٙuukBB޸nɔ)1nS,|š,|7TeO(Iŷ "2@4#BW҈ɰ}16y+ v5XSJIAؿc@6Zaj׏ڕ8 TO] TLJ+זNZpϙ..S<叫#<.Yt??=]RauȗϿ/??+=J*drlW`;U!+ Iܟ\ϟά k8Z3~B?LHw>G+E4~{tO0VDQBx-gb+䄓maQ¢H`Uq!\њiw³ m7ERd$O8.G]6RKRS~D3BTo׵귬]/Hn/*a&{LA_r <`͊L.Hص4ȝҫ87\sFSbM4Q&Ham.A8ŒxLcCq.3H*r ʮ˹}]NFKZwrN{sw8?K=՗ZTJ8(' 0cCs@q_׉s 8CNesa3zYPs\r)UR&S!sc7CA, )',$ ~,t uEs<Ý݌=+̋s&Ks -׵I!LsSBecVxnPJ\k:e)pXQ%EPqYhpMXT;1>NFc@kj15E,LfW/]Qvk5X\cMnYyzSr֬ d$"95:ͬ`HC3ȁ).l%o*P-KchBtlroaisqdcsmk>!*m=ۏm/WXgEC&T u,4;woNi閹UpJMJV8ڃ8! }À 1dO!d2mAKd2Mf2´V()s$y&(INb%ڗ+?uȚա8^NJ)T1yځq+oUyap|  ]/eFC 0<!% _=@<'OƂ 0񩸕 ~j  ݶPO JF͛ GoHM8׉GvbyAC0%c췧_w^xW1he{mFšz!ӌw*JRi3@l^oPwz i^s'moL֬/!KuzckC/?bČG:ք]ueq*%Ey&:1rH{Ia;g~2J h<ǩя;Fԏ(HLaX,lQ,lQ Rk jp(x6@1?FWZ=a5ث]CZtwmX=tkMf'$i" *&-8+ ,cR*\,K cW0#9s)GYxLP)(%?c;aH Ȋ ' 'v5psIOBjw¼8lv)C()~xMU&`E:u[`~Q+VFY ԲTea[sX~>}vi[k.S$&]6dz=(u~qPwMQLPtHQ1 };H8Q."'u;_'K{/e9N5'MOCRc$94+yֿLEAX9@*0xTr&e15z\2"sKã /U̝\$4Fh>%DdyGXM>M.z2گ|\b$agH$(N $  p&aVeZ\ %/jx:xA]x}4ͧǯ^㴘":4VuQ:{lbZ2P[RM͒POaz^VIڙ0,ZqQaI8B5$6.o&a+,˓\zAOl.Tu"v; ,0V13]r<[QQ!(Ë6>yE.  d*uWTp5-\Ga?{ƍ0/srze?,"8 %ƀ͋GiVli֝-vZ#ZWbY&2 Etk6 JY)#-L4Ӝl< (h@g.c^]I$pms>l2xʴGƌ:s!q*3"BJwXYۡ+Y[1clⅧ_n/ǽ/1}I@@rUQ&o8rC$S6~$7$] jڹirzŖ-c2cR8A3צ.o dȖn.%#ʦE/K#0 1F308 p#Rv- =F@gɵtŏ (W=qFQCF%cE$FbGI_ J9>MvL%' ]b8!BON8ghC*jmC'#\bQ}W;\Y`1WAKzʜK4B9giq*AyBP-w``ŅURN^avZE! T oѨ/O2MpB }Bf4k$׈9C4DK0 !QV1jC[<ﵑT3,@:vLgUx K19҄{m0-gSJ( &V8rZiCsksFs"R(NP{-/h-HzCzs #FӖ\Sts& %"r"pÖ'}+ǻPmMP]dǟffyz$?Z^B=ߢ۷/_憼5|Sxȳf@o@(BOoz?~`~|}YpS)g&κ~g4C?DgF6 o^ ׀ YMqR?% $*oiDg$2DlDw[dd"e*J\Yd8nz7VEG sf2 d[D*WpqsRg4$ DKzlM B"ˇaHgM[FŚY$>'=adOBx8c/#)@pFfȉX-6~6yY!2c@ RAN)Ի\Sg&CsW\I[cj:i e W SC~2y4[pVqЛsAiwt BGUA`wU^T8K8_n+hVJ: iϜՒ׾Z268hMyxo`rz7qSPHMA?@5|JnD՟(}>N7!SxS\#f1O!Sʥ&#0kx8!XwPₜ-Ռ;(dJӌ02o{L2ÉW3c Jw1xwXҀq.l:YuSG`cx/N j'q4tJEqbĹlR= @q"-9@Λ_e".JW^ބtLЯ{㔢\0}9 r*G"sM2߿d\{)!Ά?G¹ 0;i?tt_gG;dS+OtK5 uH(5AhYwϬB*c|С#ᓶ4o< O)Ϩ~Q Ϥ2S Z)ڬޗ=l/KZ7nh( P1FsqvLjfN齧 )4+b.tCx%gjJ=}H"+eCAn*z Ql^odyC;ʚQUKAA^*{W%)],j BhVUB֙j_ U ~ԱR_TvZGL4UkLu+!l+Opp//~%ڃ:!nf/^^/٢Gybqy˙GB<ӓ U Ht_ B`|Ig̎鶃_W\ ^dC[F?ti=Y?o_ b\ _?y`?f}X95I~ wAޕ%p=23ɀCb,RA1@l 9sj+@0X07௛x<*꣘43zׯPyۤ7p&yXB׋.^^e5rJLzT HbZsL!ޗBG5: a؆3B+ #\cV`m}\(Rm = NTjR[ LQݦq#b>0TTJVS7 1 #0dfHtXI$o S-!F-Zw,$\(PQuoרf|F s.eS?DfzayDZ犨-G= S w2_o46ht?i=@g!l d ~Y TcL) /7#TOt]9a Svg™B0W 狥TRad&J~+NNMr"$S#F&ai;vK FtRǨZ$JXk-inuHș2E,$4\zwNS OZfA wpQgL\ܿzI_ Яu-j6Qy @% zKs/%d c2rfq@#X _ORFjtZ )ٻ-b90fg_àV.%o DAK% `2Fw!cխi'fqi1ǥE=-.m1#ҪqȎqknxlֹ("8Ϋ9BHm1R@r:Na0ioYʕ`A%.@@dC;~DrFWk$% 5ь˴ A{c)en 8Z3(7"KG8j3*ymB2flz[o4/^Cɡ4Í4ӭ3p3uHkIDuVܪw}=:\#Y )ՒڪB  FwgO9-y&rV1i[="kic'x{ 7bB[k}7FukXs8B2f I #"&Q!,@#Lr[Y%p(ȅ4S !FB \:V'8! fƊJ&9X1ITZÉ΁19X-Uͥy)e0ᗚR%69`H)Aw4Ez&Lt!661ƶxڊM?lCB\Ddteo]R\[*1:FvDaek-yڭ 9sM)):o'i7BQ'[*1:FvDִ[Dc[r"$Suޟ^n-Iv;i[Dc[r"zLydKpp [:VI]daJmƖ۷MtJNW@0ߥM_/7K-YCeۊI(`ôD4{&QOOÇUˤo`A)X,( }EI N#˜&.X*t,vMMS RQsXAhVȌՠE)>9t.jeRʥӉapșV';M3ʗ3#|\* )3 we&z3K3:c\22+k:io'؂VYI$ ߕ]YT.JcVN0zoBlݓ9TF'xp-z$ `JaJ_+Nk^ BJq} gieEurO <A2i֌*qvSQ2BR 1+J54<_ &(1Aci+QoY)^== E:8=īDVﯻHɡI qo/Y9hUa``&T@$%vBq/&!xWgXl ֋^ژhU}G¦Y߁:n8{ܣ[VRFk%4ϏK\P.UՋ AcP!B5zƾPsR"e݆6I*uvbԺ(yԿ>ULUO߬@]}bްo|xq m#["B9´_ G|))!zSA=N[Uϊ~y k7W(B&O>MhW^ z.ջ^w#/mjUC_CA5ʄ#zͧ1dc^~ y_ wr؊ X^@7Y'd%0Ͳ&v5o_):/rA!^Li驌3E '~ Όq*/K\nH.R'FPkX*XS#)RkC-υvf(|5gl~~ Vp%.1!v֋. 0SI oCl3=3Sks ˗^kI)Xz [t+ R)=xt짾&^2f {_+mHEbw`L,3ncBm2%ӳHRG,Y̬bFѢ_S#./At~LI ˗O;&H!-'ӓKswǞ"^7WTM@wuX+6^ww𪴬%Z+:.o_nK| r,RgTJ89(1`Oq&%#"#Zɹ6zpE9 K"u%:(3 HKbDD `c^v1wX'1Zh6ZKyPh1pT,vgz2PCRbRAۣÜZ] ]CKe8Ph5YIt!2iF1Ȝ'*2 @BGfpp #@ Xlcɏϋ.j>Ob+O`x9wk\|WBXQT3 _{ˏcY}bUJ=]cFXWDUcq.5^Zgb f<夗4% zw?J&g~;u?: QH"5Q0pӍ mUf͛o&gI-ӏ4J=:!NbEC S5:ZBR|a]e[Mq_fzFz G_~_Oz0G6qrFsD,rrJ2Bs6t>NvɎN@%0"ZYQA(\= 5e؈i[RɗbuOCY<< GTO%ĩ~ē!SF%!XXctGfa|(nRX0FAȌw`NUC,"Ӕo-NtX?*]ZueQ#'Hy{LhDs@W:^tWJ40I%Le:9}&ft$CWqkutRJ+H=H5 M>PMsZ>QH_F䈡 3vWѱG3RI)88Τ`(=-!Dְ\D,_rҕPRmÁx{#b"llK挼PH LeQ0+ &xTUJq,XajC3ZxjBJR/B$G+ct"Q$5xt OpfO.`C죢LIbJ H3yҬFbBd)Hvd)Hvsq&Y8UF;@:'-a5G"{ FJ(.zݞt\$yz0I>W5'+Rp0vVDr,GbS3e-N**ce|.C>%rvTciZ)LY?HIzc~Pj(9DRKRpl!hDV k"Ցqi7(" vbYlBL5D0(#\r(3 "E::(*aG8`>\3'fQR&#lu!IUȒBL[WJl Z+65)"ؙ{B DyEE*)|lX4uul2C1ɁkW ` Rϰ$ '0Fb&M@HiV>}IsQ&d7V0SZ4E!lz.˼.[ o;MZ|'K"l:Sj.EF, /Pz) erL~"g.VHh: Қ{uĈ Fr JAua Y꼔t3 jwGR!]_ӳbU |Bi\j;}p$XRJ9>2+yaĠj\$s$|Q#K$`UZD84/N1%Cu;Y.JSoZd`δ9{O |>~ᗞ\\/Khn%L\"h%VDhwh>SòtJZ-wo'~Gj>Z]/.M6Õ)B[Ewwbnq9'bMha>Q:y%Wl/\U_rpZ'пMDb<\xw ɻw )v'8˃A\c=sZ@sQAOY'0-uXQA)QDZ[]zYw5m,C!amN:>_ki}a2sF,}rq ءrޝ^م;OqZBf~O$Ǐ?|@]|r" W+zZ=u#h!aFz ZME__ 74000A.]We5۠#n]xTEҭ8ߎm?KY'l= H,i)$jݗһcCY^ݦA QUZzbӲTuAjݔܲE..Uq\Ї>%V3sYnIԪn׵tuA o0Hj&Ҋ"nfTuPHOQHޗUӸu%xm i+mZ{&t { q.MZD*ekj Uu;x%Sق5ΛFPEP WQ8E|nyj7DJ%%vz֨}=^ GǔNt>\}+>~!tg7W'76%6/~6]rl'CIԋλ4#iWHn1dh..SRޝn],%1p7ϰ\E %b鴝:'~Rr4L\OHFrD W p8rN5S.GssEQB_pς^Bʻ7΢m@SFnx2[ y,3O3 Ֆvb`?EXlvs _~qq*sHc&k pK7Di_jw^l6௶vj'/qȢ†1O#Į%#(JɢՈP- ҚS\ 2";tZF۷BQskj^ji"d|2 V l@`WP'j ԏ B M޼J$B[y mmE v·n꡴V~ix}ᶻNQ$TzܿauP}â(^/G{v=[WD yK3,k[nFlIep&{&6f&ᶽ/Pc겢cc}HJ*b]XvDV%I$D0mf!arHn0UQl^2Um=ǓɫVL 7#G񚖬e.Ӓ#d( ϔ6s,kB*_)[<KI?Ę GE!$M`N1%"FA9]`-Ε`-5W'} \&HG⎧X1e F{ Q:$ZAOgE֞0u8ұ5W9h5! eGBJNbڠx:_ٰ@Kr9 b Z; "u]T2D[J42ޛB*X'$6ZyT,qXj\[iiE0[/! %MZ M8*w=?9{twgq~xY9._9[d#j% 2R)k܋fБEȍܡB 7A`o5Ű=g6OoE}pyQl>loqimІch clX;ki>%Plъ%y1Z\v`+~Soii6?MyFCM8yM Lt,ۺܑ $ s AO%RtOH|^Lwʞ^@fHi5C4ǀNʸ&1IO6U'XgjP6hPKT= u1-3u>\[}c|! 4F8aCGxu t{t[W^S:6.A {:ܵp˲iY|e8_ɞMcc˅Rhf5eCkd5g Ze:]7󢾥?PArFM]`z"AZ*a;;6}gvfx .u 3>;x ^X/.G{WU`&B=-Z7`l3yhO SlP6vЮdVH\J5s'| & rRvxIno"l )INQ$F~F"~Z!U'i嚪!BkWD"A"mـj!^ST3DlPՀϿ>X0 .)E,#gL5:;bvTnd^~G{ͯ|MYWOI'Ik`*'zazYu,5٬J͋EC=E '"?;Ar+,GԨsZ ? pMl?&`'Q!Q؊p-Ú{i9,4Qwk(18@e`r( ^ kطSp9nBoPbbp4%&˅j:t:-U.$˅f7(UНH$.:jѨ>FZ\i-ZNl ZZ*A__5ruۚfm39ZǕB`F&gQ "zT xXsиzL vOKa|/nd2WmL4坣yCT殼~SyK sn!S|r'=+t^RO*D]O~$X!td{nynuB d-g 9d>ϳqKf]D9"o vG 1Ca7ABB?@ނX66 ִ3Y {BB >S__xr!2N6ı`DZmFiM26ƮiL@WhOO󓲔F3d;ve;2.n' hE- [c.$ =B@ LKimH4<7 r[8O+d FEyGb\ ͪ*,J$YJ1,OtO~W7Y궯]m$u%k5ɚu,pՁ|@Z"/e:您Qrh1ޕ5FZvuF◨9#Q,&!O<<#Koi"Q-h]Z331n/&>^/h{~O#\eQxVv3?ƹ8jOǧߟ !GNMg@z Z1=ό㇓Q1/3/fw~6/] )O '97TwkVO0!.Y`5=غXG3֐#ɑﺇ3E٘ ﷤{Df#-0Dk}E GʫT{k$8,J'ސg 47D&|ni 1aᖼ*χSY~Z*&Rδ`{kfui J/?䌿9ŕvUPqL?wO^m_v5oq_Dr rdQWj:NǗWPAJ3ijFжe#. OU"h%onw?"~iPf9`S γ㑟~%M^g/➜lZDliTb?IS}q~m?61f/bنw<5EjO+ݐR5ԓ&QG*z=k3%;Z^aH`1z$bdIk@v쨝eQ-S0N̅_̉}.#\q$sbA q~lnr{ )2@Y%$Vƾ*s*$,p+!!+,GWYf Y{Dgr>fY3p4\"bT `ҹ>6 <ׂjY D1iIoA<@"Y!V;tZox 7KsDY@ark-R.w+?wf|Zk;Bه슎i^ 0ph=v0g-; sķ498̇229?=2>y:tx@ףBȝ]텛k&8;/Mf})Nَ EC;mD@%<x p=*˵vr'E.iP1vQmQ(xo `$)-ز5)ڶn"ci0m=f? C9= 'l c2seMX6>0c3ٽ?&Jw;$bַ?z,<1c"BٺaZ;]Ilx*-Z' 1Ԭ+sT\X5Dxwhh8zqRqwّ SW0NrOs)qx9~CK~͆"i#fbo(FF[O{{GC%rl7?=VZF؝1Rpvڻ A^X*{v`=Yo0fq{:4=IuW_k 59o}@4@L _ޒTNx /[պT(STj<3:억 LS2E^S+/p_x2w=_. OJefYRd?&[~hZ|J)RPj1*C"Sb~0%(t&R[mҍSNLy1<XKyOBy& g\FVhW0'c 5aI |։Bz/eJVd.%^8ˀ*RЬZ̹4cNi߀#ño${DZ%ÜgrHMV~~SZ{s0/,lT;դXJ[ɓ HDJPKTЀ B;Y?X  Ty>y2Tlw4tZ }};KOvS@ <>Ă,E.J#8s_zՑ˻f˄ARm0Ͽxፀ!'oߗkW3t~.>`tV"yCiAUs(U j FEҥeh}O![g4&(-~m.=8VIpk^Q>^y{+x5ݯ&,//dxK+lqs7@2~BɶW&*3UuܸCp˽&KIlG3 tA3@~8Fֻʜ@zv?]G~Fa\wRA,- Սb{gp{z33 @%&⃡ஙcȐEy_aW7HlMS4- k+Huiu^GG{.%cd#͠%q0i:]P1d6D/dkX7,9|m|o|=BpR&'YI.e\TVƒ(0s"YZP2q))rKyNEB -d!3)ōwht/%T@tJ,ת(LGFIB͔l7vPC)hj׆jS<ܐ#/!+= cj蚠%A_lgWEI)!&hj63claq: R<J P/"eT&)oS4k3~;;q5diԚse~?`dRFx O 䨘x58ړg.ެZrڶKLj4Y:mspP-6cYĝm6is ,>i Ȣ0C[3oʀdcƭ'unEqf3Đ[EE\1QW p+" 6ԖQXodi+wS ü>OpsfH&"xA|I[FUS_UUo&ؽu;kiloom_aȁR|Pg^L01gY4қ@;S  _Qɮوx@[u$R]#KsN8N1u$2He˲D;Ř=sҠBfB*T:G~$ ZX(/;ŴU-kRx$&-Va"т1.N60:@wR'=JHNV?h ` |5(S;o%9r<+BUnB2nFZ!8w5;|Wd}iF%WsWcZێud/7Ac&x[ű6)eAu,$T2ͫ`-PUzytw`u-OA QZմsg# קUn8 )X=/Vt+u.W#nN:kHۖh~zWe("c$VvY95IՕTq1B'3F)~6n L5IB2GajH(\7) }'. i-uzi"M㴡/Q`3s03y6(rHܞݕǺ+5:h yH 5C:vPˢY߬8B}l^QClzbkBnId B !24SGIQ m]Ŝi ~(T\L))6nM$Pr%ɷz05<䶝-`t= %;_w]ދ,fvIR46RQaPlw&-uNMnZ)% n0qnP @>,(]|oCp-fZ+uX>4Y0&b5{8rqvkk[NlCHT_TUvF ]Y0pA*WzJaI'At<"ߔ Vu1Xx|HTC.فO-8vy׉+JGv }Y ϻ6Qn+{LBdT#Yny(7@׽#S1y"r23]nyk4V tZ 8 2jDƙG9rN7LjsxƄe2d9MåHcɵ1 e 0}ǽ %%t٢u|n&o@|߂w%d5b ,ȇZiٚR$MAQ_8V Pdj+EU*25/J) s[-=3{~$oljD$[Ll߂v]:(խK@gu)wEBo , MµNDkƤf* cʲ1NC6b>1p&N!5qNS㐂vbFBPw;'Ue՛ЅƕmAPng;Qۼ u uwCFujcܸ4Ւ,EܒűDz}g'Duyl65Ng#MŅ>ah+?B)ᬜq₆Nkt( !bFC&gE&*O󲉌.;&?-\,lr%m y J& Nn♂q r ,HR2Ky*HCl%ϛmi~/# \MJàHdS(cJ <Σ,!fz6t |jvQ_ol WӐq:܇,p;Oy,xQ3ǩC#~6l<;9NdBʹ:BR.[(%!PSM k |4T$,[^`"]U9/&3P :*bW.ۯ|2?Kٮj)77"\H~8^hDThrR#d6gGȊYS9˴!(swR F3(gr%r6ױ'U(lm/ӄcv- >ۋ0 '共:B߳C" PJpz'p>wem$IzmR0'3ka= !ODiHnn,RR,JYE[a(VE~WFfZ>2lΈ{;=\Ӿ?Zw&?Qjaք^9yo>sp>ah''/тb,QMjPq9@b7&!OO*5F:̃1,-oxKZj,x\!c臒7u+vbK%8 !c;1Ms&.L}}8Dp*~dW&uŊW%B XW'Et0Ǻ2"0=?Tډ,Z\ ^==8&Rq^)o$JN9yB+ h5966d~ sޝg9yAIi,-e4J!B{KFy[m'eG[QD % ^|7Y[l MSDt[KLbq0:4F;k}K핓! HĨ0)i5AV[^//#sJlb\R QTDaFyFpH]ĺbBi2bծurNx09ZdԨ0QJ QBJe6gRhQ2Q4FE@334H3$$ @1A,LGR T,n^sYO7YI6&aͰ ~bجBf>Rs`h#UKOd@X\ޠP_>EߘoP8 C~\n_zUiWonqN 0?~IyqP|&coDqO_ޞ;/VbH:?o;BMk3_y&vOVHb~=9&rڑQNF_]}̾#7`XHeJ:MNt.Q0mZsME\c@j)VJ$0I k@C1'tx<%9ޏ\.YOfc0ږY2הD4Gm0fnP]cqQJ`e%W?$Qt? {?[ތ@ O6p™8S..nva^O hX\9d4B`XOym쩩 K8JhxbL2(L" K|Dj=U1.@^FF653b.C T:*y }JVN2m ZȨ.p@5z B*zIe@0" MJK.ǚ(=Lja&:ʰkr! u%BRBÉK\AUpT[w}zC6ƺ5U1G֡ti(]^;O{gceȤ!XTxi0HzMdpF&m Ne_b mE AHvMSEQW a&n'OHY^`%3NyJ"& W9%PdoML{`DdX,%Y䑇rqi%[|բ9$_8s]MJ3Aۄr~YquOFb/.k-9@xƠ!W3dICPҡ~w_gؽsHk^ր=%ٻۯ`ͥ@x~ nəb6L/̻чt󱫧>]_Nˏv ?Fei<պkwXMs}D]B@_+n"Vm*U6 `FRlZBzh)[V-]ڏ:w0)ƩPsnO5c⮽=CLݖßsVnSa`ٰY=ôK-fP&ۂ\ $&|J6T,uu}L\dWo'.jK= gg7bDV*X&e1K;͛l޷^ԸQ@Up}qa1bѹY51QXXǢhf,Q"jN!eecxLıFb8  ! ,Rk aAb r!`Z`{{Q5|3𯍞X"_nL  `24̉PArg7 r,4ړi@t) J!QRV±\]|7O!Xu/h[F0>^MCjړ|Toc~r66?3w9+\䬜ί'.<[`h0%-Ej x^f۔~?-Oތ,Aч\ܱW~~3hv1.@]X' ތ)փ]|k$`sJ~hH ."ȋA^"Eb6-dnLR}Q -Y)GntV;t$`'쇖ԇubU o%;\lцL` %:j cWNcXGhchTbUcrTZZo~tpc*)(trF֠*:j|~2{Rd{~r&tRN :Wɱ'*W\/uIf]M]uHR[\i{Z%UwvK:JDo H,wrJK&&˫W'UMNκ8Œ\(W=]gd -95bdH(8Q쵸 Z8Ous{. )‚sK̴}hBؑ"O7f1?p,rc{})\…Ɗuٍo|Xm$99qZJZ۞Z-h Žv׳C,wƯmLgN4q龃Ve|~.3D9cJgB\*Qet{%P .&;uDj&cm+Feu=agU#\bȺxWmu-Nc續[O>+X'MǦ$$ªThaS?[,RS1[?wy&~*$E'h^LEɪS{ _'6~ ft93.12gI<@2­u!*4LɭKya9;3xX &2G(XjnYMYtۧbh4FjG]e\DfLn{ueym4VwY@7uMSp$9uU^VD: Jkʥ?w*¤bc^ee}2@ee- `9\nbawJH2IY Ɣ+f~#ЈU/D!~<[Dh⹂Y KXee$QB{HS㔍N8h2*IxGTs#Nutf"0L>:BF"h !2#A+a%<[a0n@B=ql$wg%.*b=hR\>.s`xhH:T50 ʅZ*$|MwmDIޖ뽨Zit s=nVgLXey$\ v'ऊU /%Z)Xm#kRZҹXvT8pjGu)[f`g2 L J+J^~}GN(g|[6O֝]kCm+%U=[Y;RJeլGg.JHzǠCN*M1e4B".HXF"Gt\?*5WXEȏH&F'X[՜!斸8'hn }GRnhY@AwToϚE\osE>(P̫Ȭ͇?_W盹1!^O,h?n3oxjGD~bؑ,~f㜘9#Dhn sƌ+E\Ȁs#\Ak8$> Jw11h Vk2 #R!}bԩ[մBUElxGcad効PJT:*9CdYؔٔO ߧg@7) )Sa@CX_.ϒ9p6_+btN=%Xk'Kڷ]m@}@P YRzrQYtVQ K5?Owm UUc,-[iV+ۤB\ⰾEuwvoĚa hkR9ecQc/^?7Ik{}~MOF |m=n.r8&w}hb9ߪ5˟+)/_joaӳx Sto?/]t0FzSַ֟_ 1ºS\2ԳKpI* Ųb9΂b`MzCėG!˺8`}mF)9l0(s_rcA ;I I'NY; ۹v1/bRvqmBޒj0 XV!"onw ͅגgG_ ,ͳ/P0Hٗv r̘ i[xKє6zdLa2AY#X$A zlON?{EGYI 0tWLTt'-2ιPFѧjv=}5 ] aRy/aXALˀ8`BL[(L+noe@Kb,4u'YnF8΅d s-s xdnIJ'&{,逴w;b"|FiY쑋`͇Z@d8Ax̏&*h⺵u?3K:y ;-wbIpGZ*֝"*S=RtbJ|iWnQ5౺U'O@`2Fn^ShsPFYٹ$m(TyJ &Q3Z)`^nm`4%m_meZ*(X]\W'6YR (KYPt΢sE5s<\G/Y;uos4xs<<= 8wGl {R&;R۾JM +b> D)(MMU9c| y1v{Ndd^瀚@>|,oAI;}C%ŵ|bR8_3׾x :C1~N]Fj{Vuy 0R)F_Йs&V0YKd9wh" kSx.\M s"\ oGJT`ZC jhp6 ω&JEk& &X Jc>{|YE> .AD"7gxX6_C_x^!WεYp~v77p}2pmDD8DIi BalŬܧoAq=9v_YoW)8^~Eu"-:5su&},r6D˳ŝK/nFʇ짛n҉AL[~>^dnU"|Z8xJ?e_->FݬzIGsw=arqan.>.4">swV bPFq]Q"Lk&yei.~-3 \;v-p"Tq Ԉ1oТAe PcHFI̞^$-h [ (V ,1!#1" A JPҩtT #-IA bg:E#GEGhYV{&:JF(` 6JA\ :T]WUr._ϩg<,ܣw3NV3DH޲ޞ"TfaTL4=6GU KSCPqq@ㇾlՃ;p x @im;6_\ ͌my3i3QyϺܩT))n䠼_qcز^L̃!Mr@6)&&看sk3JM1[xT`!P-VTꃹ3[:$UB`@3lB)ŧXE %Ơ>"Ј4-a"`wZG$[uds%P'1QЀ|pa֊E.HFr,2=_hwoV&ۯ1)\QZ}+o[<ǫt:SBjN)FyFpH]ĺ\~Jf;Yȉ ͘3u}^Vˣvm }rWsg ~>{+P[?Vot}S߻ts-Wi>SXkTFo2CGW,@WGuYWh?U͕u+SC*ڡNIthuFI$%,:0RisKjEIW$}˳ ,,I[+0*0ϝk ף;#ջNdTg3tQ^Oys*0Tgـq #)&ڪ8HtK6"%$뚥ZISXOEN06ԈxEהb+8/pU~ ܮ2sPy԰'ͧI偆gL tLY!Č4?zAܛc(~ܻ]t "PdM _%*mT!BŞ,%&I'AW*CSI<HpnO&Xs-F)y!­597mƼH?k'#-& Z&R g h} |'ܗpzmgdoZ|3 Y'BPSFנxUtQ:*OҊ>6@+ 4E֯<EwosՓsF;^/ =F_{-ﭣE]jZDpFTlaE<[<1S/Z3k2,Lv5ԡsZ>ۓr$lO%]&`Ι,8j9QZKxd[c1U 5 gG c|@Qc%b 3MLJBUa |Ɨ4rРתrRZ0+GEÌ;t/DrN W^Pa2 oq FPbhǂZ1X(i BK0]Rhc9[WURqe8y3Pk1:0L"A{Mkz~\uDbF.+s+a\at dFGLXauA ܔ4a uATp53.R\@;gb8颇 bИRĥZ@w ,%y1A:$7bֿxэ=>5 >RyT߫+3|%G =MdL \+%{;Mt$4VԦs?80a%"nʱ0>->tEwԳd ^G6 >\עa:a[!EX0%!uyiM"Jw7Br!{7Ƥ 6Ք篧@`}>UPI¸uEy@q= n=UD-9¦<o>suZkmL;iɎi kuv&ڞT]S>}>6?<`T]-3d|=3xC~՚2~ޯ፿OFstz&.?I.$ ,ᖳf'af"K #uwuWwO\wH݂WY[rHYaUzJ4ÙX$YLSba8ڵ{Ըە83 PPF.KSF 5&{UN$ΟRL 0ȵH"O2Hlr73KBI&bM #zǏ:N_j{MB4&5`'# PuqZny'3=M4G.N׹>/`&xd'w!G7˸^iwastHs |.o%K7L>uZLҨst}N* hV ve6P^*$gTC*m).}E$_?~/s`_s z8ēߋCrHɎPG'o#鰵 0oϽ[ v q=ff|=GŖ>a3ճVJz甡HیOٗ#(1_Ze)!M65FDDi0lmRv&g-r1u>ؓpGe<.{:a9ԁ^}ԓ-G{=8 9±|WއhxqA(BH=8T )^#Sp)/YwQ(uĉw6u.zY b6gDt%!#I='LY[%D ƇGЪ#DsցmDu OAP,!C\ɈqzGn uRZ2'}p&Dңv:u>IS06{RsIZm"KX!ÀUjjK<lzH VMޤE~`! Sg}g0/̓Oco4XsȍgT +X7xv?m~b4.sӖ˲Zʬ҄! G {J5ZPCFFg雐,5 BTxCM 2E$"2V/׷W^ժ#?m/<zgYš:d,*f^-\yv!ny|VnJ lqGΗWʟxuVc$֐q`;_.p=?^7|۳7-J Hfe3Ӑ̳l,T7szâL "EbeB;AmVzR`DT6·EyyT|W-kU=OO22\QzA!eҳB^t1~ƶg$BEh/.2 ,jgZ0 ̋0;^pVMkE892;ymPdRxTeYjEC_QoCҬY If~dEi"PFZ.]u"Pè< QEeU)y'_QogOyVgOyS~qM Y)+ 7N]1 "$\Qq]q:D֠IhJ)Px C x\'*N6GO&,\%IF]/:*vrFY8J&6e1$ fD$P'5M"$2M"$2k7K7?n&jm[ !%k4 %";A]J7,QZr"pTOf p\^"}Tm#%`&y^F~wk7бS9N!ġ7[ GD@L|P?lGQi4`To's;qĐc tebgo`2NJc$>U>O_7߇pǧHX8.m@uHMBC(&!'-g_i@64%&VɈLI4dZR;LF[/BVI˜$7lȺK&Uב3z@ڬZĶwL$'wwb(OO;tvgxw6q U`윸7i_==#փ;Q$f:gr;əuY})Zh!t71/mWK8kWp8M<XpΦ!bthRgNN˦+1/ʊi&a)I{W'n5dkG˝ёNr:RFV6|ZF,mW?UQu\$O3ٜ4/ǵhYc稓RvS9<6Z*~H5XtT}D :YtaT G!U(DtwEF`~g9)'{N!06 kpzbz D}]]jmfk]b= /Ϯ?qJEg{ [\?E 5)C}DB"|IR2J`)zlJTB0%4DZ&=hRH\a1kj/1%Lu9\B# h@>0Iu@:`. aFD}W$g j7hTRb'A0`=3p1/z|Q@Rv&V`w1};e-1emՁĪRKrȦeb8>V|h7IFH(ی];&j~Ѯ\>ʖzhI][ bTt"r<\qu `\'"8BʍNJڠ5/~[(?]5=VXƘ/Y9.o o;`n T?z4tQӎ{qTzerʨW:NCY%=mUhVx $=BSyO#2K{[qWBka4L^cN~]4k$x ogid_=^B =k.aZmG2jJ4nDGm zoHx.Mҵw[NJPdaRrv2\)FFjv֣"*X[ ĈOjZȞ#W c))AX$)Bɨnjs4S,"r*#?ѫ=={.8=>qHrV@mC щ,l r9#1"Tň2 ̷ a}wM /h]k *F"Ȍ#6֥pW96\\̛Ω}a&&yo3cAzg'H9WRuV$.f`z@ |ī:u+1 q(8 Bf[@"f٪I(0yd1*Nx/eWΓM`,JlZ'olq1@vlhV̰E,J) dT9ׇ,JggJW KfeLS9#Y]-TrE,(,=>:H{E4Ae+NEC2Qe;djv6ĸJdPZklLH:*!(/fT`ITi+28Ɇ8i@tC[>>( !-gG| #hTx: (+8KQ܊^PH9W^\Fp-\!2޶ғ6<]\rn4]3?vn􂀔z)'w!t՘FM vͯŚӁj} @5A÷pĆ)Fg`pR?7T0#Gߤ D.:(f4<-ɬym,zCаW䵱|O<'ӷ|WXʘ* %miAQN*GK(/89PHs)ЄTl한EA)<@ty;֒+a'C<]O aW-Gsju97_ЭSNpڊނc6SviOY.fY.hG3e@nlu3eLo[vs{7-;}Lٵ#͔Qk0 Ã.~ uǙGZ ,`4q68ڬniQXeVC(6w98H2WKE^#&v񋃣R+.nXUTJ XF}#fYcGJ;5OP= DkS.Ǫ ȁs-)/|O<" lNPL6Jq'?hntŜj_`)Ј3G@qk͖bKe][oc7+_3y) S vwH2 Nj+ɝMߗdf[ Eů"Y:V\cI!WH`L1qcR$kN tPN\P3$IC 1睐g"P8õ0R2U ˲8rh58KֱNX_DVTgb(%F% #GAT".ݟ+U)#,Uȍ/5Gz>AU=AIӚUBb"U+k&B|:sw+S>2Ơ!)Ǔ6m&4]Pb!k`v]p;0Cfwjל X=ۜ{g}޹n9є{g:G9,[h(m:g]yt"HW [+Lkh蚅;8SF1aL[E=;[GlpZ[Tr2wɓeҒ"<}ӁM dQZiS6Nz.ѷhNJHb|qlbOkxmw]nӃOnj;R8T"1] +P`bue) A8\NB~q-|~wqݎ~\ AX nWY=j ko^Vn/c/~iU\MvAD&zU 8]~{3!}R6Iɤ׈[7pcDZc afAHn5!a PTzŸj2A5gmrkGJ\E#=~%^gO.V!*m!*fMԣE@`M~xe)k.g*%1g3T)pfd/p)@vdSSr(S\2KЄvXAXQNԕq䢵gzz(n xb >y=]~fݴJT+NRѸ4pq(wU؍Kp.[O {)4Oӻ/z~370)f+ MO>ͫɧ?L^NG&X jﺜr'ڸM^I5 dx~:$Ҍ (B"AbPk.|l'1D!Y+JU 7,,1 h/5cДsRibQ)6! rlZlJnHd3tSD*q @6۾]1 ">c}ƎKEeDc~~QF=}9n$F%)Pݥ(o$(Zh*EQƒZs&շײuGB+R"D~ƛv)I;9 ፧h*8T䯏!+נ!iFҜuJsVT2ba]2ἂRSKJҚ(CR:/#͡ a6dڸEr]B.!^f\P&4 Z)n !1„c#а(-mfjv܁{^H$Zk/j/WRJΤ\L-O*.if8)☓8D!)pCR.QZJ 1R 7'.[n'ѩo٠IK)>/j+xl*xq,((pP#/VmpozӜt\{k59 9J`ZYF( Ү" #e2Sͩ7)қPJAkFi) IksJ3F4!!%fXx*&TVLfG̓&4PcV#-ABA? tLXʨ5HBxWzSQM>aUHw컘hf[uLΝϿli7Gq}GBՒ;|T0ϟ'}*VeƜHŪooG?M>s vUF|xN1wb], ۣ)A+[T/mlW^ ف[)jt,'vD N=ԩJQsML1*AP*@i-5q7RE,x``l} RnYAi$%U)P T=S=:ŭlU&l9ea]Ml0W79W!l8v}|Ώލz.#6ln4\K +|e1֔ ݽ_~`SVg\\,{;9:>+6{^KO|)/K\3 IșhLRM29햋AQEwD <-kn[eO4U5!!g.Y2ڍ , [&I9F%۵D);(3,bڍAh\ RD'w*ڭ%Nmk쉦j&$E4CPyw{R,)SKb]4g˹ ,)-gW_`R|IuhZ_5̺K=BYWboɺcCuYǞ7L]s6譆'$jToEES XܠI+,`>fG4Rdk1T/EdW6D)P2OAYT*ZUUa@/J7h7CMZ?vc ;SQPIef+VMLͨD;S'SlH.%d`灔V<\A5jd+H@z.Kf$1xFN}pD@""Κ25^5Ǔ} Lǟȓq!?9 $KQ3hrla5k1PvoF.H-H0}f f:%02n_ᕼ%x/|hx˵jޤXm0Ū}GjF/?ދC~u&4EbgY*VMz1-8np8dPjs`Y)#U;/w9# O/w7=0&Allc@@4@n[%]82?VaFĝ5(O{9YeI1T$J*/ &9TByX B+ SdSR(gttesCV_qgq{̬02W Yȹ  Tj(S)9+%2m d/i^ɪȮǹ~^%YaAX*QT2%<$w2*pn@9f Ga(ctkxB9.xɮs;['"̓EA%%T\{(qI'{* 1Ҋ7[EL e] #*O.P==Z&j݀1i.0ȷQUTGa%8*DPH#‚|VQ%6Qce]|($bmcvBKO=T9h!!8] `50r1hpj=,~%:Z8I z})٩b fIv-^ͮ5¸YXωQ 靵 rLD6ϡ7m),eͪhaRqずq*o†Z00+vFic$Ȍ fIABGǿ>zs/S!ޤ,sO)*]>5( VekYuxfFwAEk >1_1 zѦ\/- k!ZleuR,Nj0`APt|ss4] ptARd)])`9I,`v;kT@MxѼ~ QpY_TwӖy9$=dEİcʟTpŁ3IJ.QUyHNyHD1R?y=a̺~\z] ͍jOZ+e6ۥףǏK\h۟'zZfp Y|w!q BVUw'yDgE[5!!g.Y2% [A-v7C=Tք֗ph\p~sil6kc3B }:vÿ"%r(wu{`@Ӂ1o(6wά͸}0i笤y-XaQq6}uN_gr][Mt0~%o庖uVІiåUU8X>z z]d$S=E!U S"Tdn;|fdk {S0sRzFCHHBIE-BVB)ci`ϽZrsNsB@bHm&F3CCXj 7\cf} @  G+XϯG+XLƌ B ìj/0.9ב;QFoGQ| m% L ~w,= X o+t;̭Bv:>z95buΎM8 (`&Ĩ*ٛ*̀`>WĴldՓ -iwM=d!L[,T#Il83wy#α_wJ9]ޝiͧgd_'>8X7qy5*q?$UWߋ*rinf\$swǟ_۱TѤ0MR_?-8J|88o󦊓xTmJJyPX_:ͧ"CBgK{P*dz^U!ԐT= {Wߞ}+ϵlӥF|kD{v 8j D<;D!$Q8ũg`I瓝A[ζ&kC2'V&|A{(҃zI$Wo6vj* Dg!d~ ڦXny zXs*ں‒gTNh\ BC]p[Dp^Xh U*dN3n:(6ي1RskɄsId3QsBRcRC()8Q#! z'5ES,!rbQ ޽r_ rjBr)J?{y7 pbb:ϨN":7 nmXWn16ma^m ۥ*"x6GgAdﺉ0f.O?Fj״Sw*W O`ğj7w$}5>ѻ|"],&_'UK'6SBW(C6Q4// )"uѣ3.㢞 po, JO1Q%>ŬϾ6(gxVFCD{rR_X)S8_#EJk9pNpj15z s} ٣k)J?c1?d.7y$*9.t I):p񇎑!ĺs)@E"/SphB6 cⳋ+.Wi-7X9 ^7aFgʈN -FDj†g6 +i:QM'?a+dJhaިA{ _bF$>|Sb0oka_8*mAz̥cY wư Z~s[@ _;75Z;mSj.Pb*;:iwykvm'y-@>ćT\mcګʮXXݬS {75l0ݖ?;״pHkfr:.׏t+\r]/c± %Wi$ LU 69L 8EZ茘Jc !Fr FLdP`E|cvl(%Z"SZ,KS +cEd:H5I ӌ`%$Qd"g7.^Y->동\գq;+|?t5)eX | 뻹)?yb2k"%A8W8@/?XaXv%3Bg.߯iBF{{J6; \:ۣ5 3Eb>XuSH ."DbCT NyRq\= q9{OKrhL1VJ / 6+@$e5ϽX3X؇dG3\sf>CqtXfB8v[ |$]ߋy 29DlձoUkDvB>CkrhՇCDc3F{1`̩?cw8.x jZ]K$8nwVV QSsDq)qT<ʁi㛯d~IFOa[9pGKZZ;Ra}cAJZ T&3Ƥ3 ' $%&#hdJn,5%Hb9Q>g.VF[&f:POEƻMtoRfNRXcw(\ ɿ97$XjGh{R+/dϱ&>V a53~Hن|&eS 'y7Lb1bG|FF'g1CyN-XWnmq3igEIk.aKXmU;qhXwⒶufvf\rK|7PW2 UpnӍħMK\1jc4T@TQI-[[Mv:r [э/яYK@[YC%HS*+tDH&*QbsФ//fflH=& ܏X NG~yo.@Q%=B8f>"**ɄU3R)ud'?HS)M9a9TH,È+0)J3+br.(R A | D\޵pAGܫ X}W4p_eoG\`Pod!+pbɁg<Đ{x~2}:ZMʕAbX!>= ҀY]A0uPؤ. 8ub%F[(@O`u}NJTzo mlgV !}:*$TRljH&2^W5끣 ꆛӺDA!zbU{‚x> >7.s>7.s}@ y(K`\Qٽ5Lh5!"rq4/dpEu(}}1TH{ 2lB$U\q)ThL(56jo ,L轀J=tSTj M=Zo SH,:?v G&VTh7ߚFj`کjI4ʀ6˺~W.<:ʆjb7t8GTen'2ٹreu\5\]IVt$k J %/oM:]:|NW$شY<.\JtHu oڿ _/Aj-!q(δ]c;oR4 ,3M8/"woٺj U_1EuW4O No;qƹߝZp5%b yR{r +& LA#@3T9*#u.9PhNڀ87r3dMGʍ3jy&M@YKCsʘȬ?!d%\ A,vD F|·EƝ'7˹iwg~OmQzԪD?>}:AF!6'{»`x77G}ݬ[btj]'BW;t~]a+쉔J(|z/XM?tcM$`.0`LS렭gA R\ q@"+`"s>/QXt>¶0bIBpwA!<LX9Grk 1XuSH1h0tUl r6MVfVn]1dcpFbݳMji]'NP Vԭ}r1w Z||0v'vOƕLԒ%*#:)$e܍ppbbv] y%L|fyY)VU=6no;32.f%7#CF{22mY}8>uZiUjʛі-ygTsІ;Wa+gؼkî s{ q/+,Xs{SYS[vPdĴ,"4VBG1X )#LvobJNf2}νj][y s".[4 h5덟K:r"AR^m cIN(+ i$|ǷLLZ KB2QD (`9h=CcE"d{1W>bDʜCWsևbuu`c\hZgʅۧ),%pXf㤠F*b$ŗV #ݯFnjIv倾9N]ޠ.Jg;%O:ۈ"grQ5)Ovbѧ#)尼6cwB*=Ƶj+8TӸօ9 onz$!H#&w@1*.^{Y H)N1TqIr>)NQ!'ajH̲x@{Mv] HmR C(z$# NUѨ;3!-BLӔ`,gDghU"BSn)pM9͜Y]QA)S Ȝ$ !Sk!ayY_u|K3j7W澮1- 8 jRK c$)~^̍P@24I vNKD`kv7[HNkwBTDdHrf?$BB$_'7?@= G5 IV%WDܾczmEuˎ&7@jr9ΈD&Z<=ZLaϞtVMCurHQ$Aj~7XڿC4em_|(oƩ~xXp18z\TL6I{"q8GbIZtm {e<."SB&2CwZeefZʚ&;S_^BՒ9e\Ku}!SZ)Tp"b"@hZv5:nbUg ,vr[i[ 0Mlf'D|dfSFI"0A$G%m$X<)(hIup<T LL6P age 7Y?p#8⸫g!(nʬ˖rmAXY<*TI)ckxNr`':~2WW]w4)woް[}$6Va4hnR&-34`M0Vnr  hC F%IE?kmU΅neF\p7&D<`[HNx]'n&uYKUOOˢQ 4m*^QjI"W$ gf,]< Vڔ싍I) @7Kt 4_ʭᖜpøֹ + inSk9 LE*xZpu7zYQ\n̨g5úvN@HX5S03a"'ڙvNJĻ82"2ʰ9B&njfaMŗzY0a&Rs,IM .*R weSJ?{/ol0{;µlRB`$e@`%tӷu!wU}Ǿ|Q+?&RR[owӄK< pX% ]#R />kO5:+ͧUi(oR=ມBe:׺qiۨUp&O]nNF P[3ϣS:iS)38}}np;7-bM+S;PAD?4!՘zRmA@̹ƴF>]\tU?IA1@ϑHJAf QSSgvsIݨN ڜiBac/tsvZķn*AO6eR)@j1u;ceS2ʌ={}s/譈LSX{/1Bs"Y+IA4>ޅ̹T {Au /u#oTє;p%2".ZR`AeFyE{22t|jFiVTd^p7eF@ ƚYTT[Rmf3)!nˌP!ȭU,Q}^lƧᤈe^3rT5XMJ9RHy\dQ{I@c"NKTb)%ɭF`r0 2E%V K9F*ǀ+)Ӧ,ox),OsV Y3#2ezOsedyJLAh!Cu:i϶[ɖ~ Hi Ꝥ`bqs ʘR>GXy,Z (U).2ϝ΃om=;ԙ6^\>7A"ceMQ[I IֈH'x XN׭N`6}e䝠q?[d6!e⣠r˔s2dje & jÌ9!*]Pd ?DRqn[ 6x}HwuVK;9è <$!0n$^s?A{} i&hedatI>ZQrʽ]Q攰hXح>u& ۡNr/QS)Kx4&u6P>Iys#Zf`V>:2yzGMܩ rC)U/=\n0Y!Xͅ:v@ufhV5LNC?30J\JKT=陘v1OZv˵H%˜@?L'(yvY[vϙi/<{3eVƈHw~w_%?'yͬXܻ'˳YWHK_ ݣdmmio~k-v:u| a ؓ@ȩͱPbx5:Y73TH' { mySTtLߝ:: l%p[e{-V~6:S_+ӵ܀1NIXCr=) mMH>aNH{(>%z`9Ziv ~I˯u<">HルOsx ? _uvcmq;?ww~TtW?BljowI _rlt=W{q=.,z8zO0<>gt״ 8>L~; ~#kuZZya|%_M>y^<9e|aϫ fdnrg38YVo m~̜#&>ݽ8rܰ,#́5{_[L۳ cQX6a ;Lei|Vܥk:n1x͜>on>K۽q>s:{PC"*pv biO?ziLᑵ`}; }X_o_ [v>^;-|ݰ" (y^4FW9KuWpr|; X%{EɔvvjKqE5Pd !RY|ŧ~o|_WW/n??zB{q}_|dbVfi8^|<nŽ'V2 mPR}d-gJ^X=-p;lk}^]xUzmp3[74Q@zx[w?Ik;ȥtlByN䕗^&!.* b;sUxT?v{.LJeI=1^jYQ/K3g\Ճj5zfQ2 ;Y!Φ7\#ºzfoĕZ:{|f۷owbZ햆%asAqAEٺuΒq'\YLĎRŎT. Z+}Vج8&mBHOdAU@%ue%.T.)Ttڴt( .ܾYBrԢ)*LFcdNISgP 9V@-5NNDkF--$1q؞ehT UGP|RVAQgy9j1,l $gY;#F!G BX'F9XS ډx^Pg%:E!:Q|$QX|:V`5_@"M7vO|ݐCN8FfnR/F|ҳJ|-Cv$4,mA@;6Vz~˂kK9lytSym3/ a!dyQ3e~U,:},Mm;Z~/9y}Q\C {iIqĒٶwfI V[~7qkhYu^:M!H3EV(΄5e~QըL~eKnĂ)K]d:{Ҋ࣌%U D<.T\Q[G:8v7v#^ - 6e? by67ltD [q߬JS]R٢^6ژG~4Wo߼0L^?)4^&2oĀ}WT28Vn6n\ˡ=e[BIӭz_;d jyƱ,xyǙA#C;qh/ebU38VFzhVWVUHNR%:CE@Dr¤vAvRKwVݬaM)YK+iLM'#* < aʦ)ˆ&eeY0ڤ Ȭ:]tb L>ݠyIJ`PV%`[d4XͼW1(ND; ڃniY"b*}`Kqɶŗ]z>.hR6SKqh4ho&ְR@ys g"+%?ǻ>|C11^FXp}gpE)IHr+XO)և=.yvbUB &k糴E:YP}B 8Db !kչ&6*:AYn u9N~MbcM_)uL^KV&1Y%V,Rij*$\L6*B5GbKQK.koEF95\_ظa;P̤\QQ_oll.y#:Bt,|+dHdIptyFbι.F\&/ls5DRѩj :~XQkb$^p2~5dɖa');1fc!c5A%o 3DZFEβ%45TdUBEuV$KuJSŹy,Je'2QkII޲6#J 6v+Xd$,ֳZ¢) RyhlHqN]]6ܺUZnnq" OYficws ';x48==tny雱X|OFpIWeb$ǟ~? kx6|dwѡ|d'|dLMg6IlZ0OƊm[^}9=Vq(lXg\#=3I èg> A9qwN7af-:0tz%$е+&A$2az --rcR01Rמ8q]rf4n:H ;59{!6G_sGEM%'ovw.XL *9͋22)[컃M&AdYM|lxJ^ؘ4#ΪE6J˅}$ e",6[$ttw1DMAT(G-iwjbU[c1"'u1[@}R1"M;'I`Y;"DQR%ZUqힺM ĦC." 7ust)%y’%ꆂ`o1(G6\އV uӔm]tADfYILa˫ -b!g|4aT7B5?z@X9ڍpIv?>}q$4=C_3v nϮxw:e6ј{5-+AwВGE3yx5k:|2ҦK$4>}dّl%Zǔle#{ׅmz¸.strGqz)Jr]vy (+4lËBq$a&oL@> {!v )R\HgMttwk|ѵ+XM>_])nfWKo|? jx}77f2蛇ǻURUM=f\̷__vnnQ<^m/?|Es#_/2I?¼厽IB{=o{xػ6%lxIH^|=rBHr9ۥO!)ZaP߯gW!]IIa,iuOo]vߙ0lwn;;@C]v^xzb[zoηǣz]0~?Z*RGjٷ{oؕk<'je ڵ jGaspֿb4ճ)̻Z 7}wlx۝4PBˡ5zu0oA"ӳDQ<$BNId,SY}Л%){]g.Ί`+e"]u%mZIϾg6w!m0fl<BN}r&CirQuq0ta*CPC\>jl?x No<{g(ۜNJi4i:?\,hxnY/Fl%]>ղwk>_f_\ץ=;_4컖Z;unއ_gZo!'ZikB0A:Zyk:Κ}cevoh? `7Qf]ָ7՝8~dF$S}̕y7waϜMuڨZX?OW3V"mBqe(hpS*}}m2}.X t>fm F62Y|3z[^y0$Yw:.NtV?tEV hU/a^[_V.n[no0TܵO/p~y :L.|9tT`taߦB:pO߅m+gU8c4ws0J2KYwT(.w{"10ןL4qzy"SX^nx1Ѽ ҶVygze::Gy[FҪmw6*´?7Ⱥq`mt֟Ęo#OD.ޏ[{pmȤ.Oat=R*}x r98ݜv*a\셋fXVvA־t}Z`ԭW/~x `}0{هJjtgGէ!҅Rq^iNӦ X[W.n48uݾo i)؝ug8F>na2Zр]O+||hRՔ*(MV5ity\-E܌gY(_0$gk,6HUm̐HA>ؙF1*LɹU׍  (grʎ%z8*U9 7_~}O_o,|TZ9x.g/شeb`_/%5==dZ!rkj]qhW̹^AJBxa5U귡HjT+.Pu'W4+oT֩o~q}i0t#**$;o @p+Pؘ(R=A^/zB aY+e "ƢT"xe(86".9X 9 +Xڃ~{&s $OsX>#V暮IN1Xe#+rr͌1e[,27{ZfU`YRRn Re1BPƞH3Dcj$Ep z[,0^RI(HX1qyyn4xlM|? 1{o$ y.%%%PV2Z0[pC2^MJcstc'_I%cn0s.p)'`ThCC, TA 2jqL;p'e@4Un*]%_2gE`7>哓l>w}S-ZJ00cN $@MB~MtT yJT 9儫3Ixa9aÐ2Q- DZLa¹H#(_\`ehS{y,DddFQQ\ &82lԪP~MQi".Nൾg(H?AaLw wwZזwBN?,n%LڞvuI5ff֝^R֝˺L֝^[֝#RV".wdFpBj؟LZf/_iῘ|[4̇5VJЬ%Ik OUSGC4V}ccwujtt;tٕulћvM#5nE9H\uN=== |v#~fR.ce:qdȭ8GL6?>%`4RlL1El<[.Fh6!8%h?j]O3bro:r` MZ`\z*@Рs<,&H\QS1a RXX$2`A2I:-P8[ e.Q6+]K4~W]Ⱦ:kuk'btxOno_.K&T+$7|Q/6vmǮ0=>˷6F\n H,~?4ѷdj!%k0? 3*HF }5G)`_#4 EӹT0'PT`$afTc<XK"No3:8߄ -Q&C6ќҳ@2I(Y-3,d!R0 &Ŝp)kZy&m̪Pc8x`h%-Lr̗'-jo{1.yi̶AREk]6ʜ9sMQ `7p nG8RF*D LbLZ C`yH˒K.al"s/>%29@d$VT.G( ":և $r Rk" HaFpJTDȑhWp `gAz+%- SSkΝqu;S?pò1I^nօt+LF ژwHG -48ɱjd1gZzrXByC pL؆9:F%xt p)Bi7cjNJ[FH #))yvc כdoOwu#Ss^II3W ee[?H_vqiT%SٷdSiV,Q(ߒ RHIupHVauy^S.Oim>E\l\5s4OG݅sPziPL\4P03(!fpDfRF3.+%TҪώ[kc lyR@]8 P05M,WWܡ01.AZ&D Nj $~g\m4kόf_ǡ%0DW(JTO[Z$j="`pYas@GލaxV 3zcg0Ę47q]\WtOb,G)]Y(ϊaFo_ǡbedXHr+NDY\!WhPGd`Ibg0ymȃ0$VUd2*&ΜIU9Hf+Ec1|^ 3zc ){@k4C +A+d'ؑYdH US6Q(85D[رpk,ᝄs(ÝL2,_*dHYXiK "DX{ @q0ldFqAeKn $W/ :ikNp#T1dpABLPp!Oƪf[AO!: |1[HhP-'PbR&.j邍Ŝs7ARbD@$GE&\O^Y@Q"VrCT 0pi$H$ѺfT_{ZZM`Dou6n,tkF4*RxHhF҅$RH>()/]8,\ puĐ, 2r_B6>@HRLDO/sRAqV3?jaiP27+q rIu.KtYG|a k4ڔHCD04@l uxb_tI4Z 8n%%Xk]e}9!*}M^fY߾/ tIBe8 QHƋE6)AV<_h3ʊ͗okz䓅ݡuUZ(]MPSo-DSRS($K+=8x< QG3Gڸ4v78"M]^ y!"k `9ɑ,F< Dŋ) "4Z9ϣs.ɶ e)Y4T9Ӟh@[AghUXV-$E-"_V8 *iR 2gx&Z SгFi#YP5 zXF8ߵ[W5hMOsMmRSzt\{*L$E^GG: r7;d860p[ved~go7Ȣ98rX Z8C,\B2gITd;ZE_QL[:9/ k~"=,䕒|=c^ɯ92!|w_Zۡ絯^}id;ˍoTR+iO0Mv4A2ed )li&lkKOO^S."X)KkDa.{#C"[gbjN?H_]1FL5{'em@$0 HEXfՒ^ksFxsoZ2ZHٱݖ&{zE>Sq/{㑓l.i=y|Ag@+ \tfȺ]MWwyc}nʮ.)F,6'?]! ȋ)A^;0]ndZOz SzȏL,z>G=]f?f{3V>|<]vc qԐ:Nbvߍc|1 hht›A%rѯr6 fw,~b߱8߱8܊A I؃u-  47#A{ Z'y~5NqPo"qhj{#?#6 *}7NO7ÛWt5z28-D+:΋}DAm =zV@r%D`+j%V|M5>~^.f+XgKm]+rI! ba^jd[j+F~7$~*ʢe7C;[ H™4,p, ;oћxG@J0i*ѶBg);oJKޛ4LE +!ʌkOܴzNCg%)5 3IνC& YgɘAȠ2Cۙx,O.ZѐʒYJXsM~|zO;YrKfœA| ='ڛ}ambz<# [ov} }wn$Hj~$IH,HM¤Tj`p6|2! v+e|ijg5nˆh0iB{{cZzݗ(yGn|1&"Q?lbRL̃8L tصIaж;5bwBpO0yFCM$tjo6 4}w)D s^:^1#IvЃxTa: %BxQ9ljoaO"Xݪ;fow}/qm%-˶:ĶZ ^Ho-sډKB?;2̗oZ+6}KX,E [ K( "f^Ȥ2;L,oVo_;Z<]W0a = B,Ul Q P͈] /"F0 Ib]jwR ܍+V7f7g8}ʯC/ohOۗ9e}f}6Ӡy+FMve]GX> L  :XIq|bYbJ5}bS!z|'O`9uFI:śU{ڗ~gJX0AWhܹblūicž򗟘Ń>7nQjzJ#cڅ 4/zWs"ޔqLu!(>4~"z=w=cDdw8@٤@H 6):Zw=CIöΆChpfJ?֑uƔ=Bŀ̝v6 ᝱"Y<}.(=./H/o:t=/Kq KaQ얂㸁ixRe!5QIY@@;qUtq;-OHם08 |u VwN|Mõ9KkDn< I+1SvsCޛ1O[ŧpzy>j6ӵ#}HgWX:lCx0QCA";y]$ip\5ꋁKS i4$YF&XMlg!ocytOO"2p|®c.kewemI01m}(Bwbb;z_V:%X$HaMo (tu7#=Bw֗GeVeef ,Ø1}X,oo߶OƂLo&x4Ad]\p wLL3}DIbJ.՗,WSv;ω2NCK>j*FqR1p^2檖w<@y !x^$í`4l$5=}V^Ug2^~s7EvgiuX{%Oٚ8bs m)ۏ~*I{;yڧqKS9.iOpu"$5X;[9w/dkM:8qbXD*(+%4[19?,ύa+8Ircb"(97ýS{E{rIF[엞eЮɴ.{6j/w\טV-?qnHB"=@jѽW Kx o%g7Xv6SÁ8*NNj'Мl5x&T(S]@VG7dC)|b<鏮GιW[2ezƴ3p"*HeN9me7m8C 6q2sѣǀdAfMIt:&60Rj:TϋI,d)` CGga ##\7 -@лǗ-,(Zzc.{継V=1)ۿc3}Ss4%hS/xYO"x̨X=웜Բod ݧB>&} swranzVxzܞ9EfrY'Ef C_j0tوrQ8EŃ1!`gC{;=AA-(JܒfyW|zWɋ~q%H9Bi#IC,R3⍱Z%S!j=Y:)BA^Q"FHMIsvڰr)gnUyYa+l>[ kH-Hs*H:ARި*:BRdz LRqEKPsœacV/WM~]y-մX-3`x}@1*۝YӇb(KA>P^#C0-  v`JR'AxX 0`,edAPFP,Amv' cS_7SF@50+Б A-b1UFVXd7Ö꾕jzوX>d%ܬ W[@Q2`= N%I0 T;$!O4 hP%?2ͨ,zKEQeG! a1w6Huq(r ])QG@>(e qXx|uǩDjq{la=uȂhQ d:hfNj\"9.#cZ3T.t-=o>]bj}q/ՑSJ o ytD21dL<Cr]a9I:KxS׷\ڦoUDžouZ?4 tJ @@YFhű3QY8[GA ұ +roLV!.C ,.XP{#S"!⛌+˸%#lLR $D_%,KE-aJGZMojtglC|v!ظnέ$_^bz* Ο(.qMha qsSЉzNb¿*:/*ư eUJQZ'y!062dP #e E^iCd63\@l?m6`9cʂa/c"@yJڃ<}x @7&+bvR{wgo?,_\>sҧ$;7VxO0^H\# 1pQH0ĭ1X; kmŧҽc9߸21jwDqe*\|sfz`DA0G#0if3@G)+tDr:()HyBnYp+L?n@ГF1FX .IBbccb~}eAv@T(֢AW,e |++@A!xHi*a56[FP1!HLj2xIT/Z{G{UH'ZUGc4锖WSTqN Y ]r~GWL) ]@BRu;zrONz4,YS*IE0-+ j_>HmDb J5(Y%7ZpTG)GS \ s,T/boBQc)ܒ(~N;n;:F*JJLŸN`),xBH%ucvqә,~w 0%<ƙjze̻@v^VO~^EOg DM_˺b|-]tֲC;6j@)].Q4q9j?tX e[_n]*}\Ds`Xh sqG@[ }j&z 2_AɃqAVh.&XD0 ZSߞN_I'5tSs=G9ۿڝx)QU #R rLa;NgOm.hj݇|"Z"SMA9n=Ѻb#:8źIr4u+O̴n}HW.eJqT'X(K >$QiAkw/3JũY5T^?ܦ"άwoJrSS `@wss_"U!}c Tm,m@Z2܄vbɒgOe5E#?@y\; 9aj@#\m ps\S)CN1rph|t:}ס^Ƶڹ202Tӊ+G2hl!r[:"#Ȋ)vX:}&kR:Z:b#:!PDvC#Z2˶D K)M Ŵ0&j_Ƙd nk[z@SJ0ڰTqངXs@Ff294SnP+#wοC-|,5|{V%| huX]Gd<-*rTɃk-Vn=ɍϠ&*bO}rH#Y86C`:hxk$zeƊR>%禮2`;`l1$Q. d6\T\]_#bs޸-fMdnu#m PSJTGW(TAڣ\/V]'9^|W2;f(brZ X-V(gD yXPVi5^)üx >]@MPd0&?@9LSF׀:I D lP]' [.| 4ZV >="H$Dz.KE(}>cS':*DvڕOGF͟#)3$\n|;)>kd@__,Ãb=M}8s;3뱔jILa[?pfjmL T(cU *-. B-QC`RS0#QHqrx!(!ؐ91D*NP2yY\SsK"i`桊B|# vT&J17ƭ^JU |tYpHr@ķ*O*p3'A}R[ …r" p"f!ʉ&I E z,~^C*|`⾈3r|i`T-Q/ܜ5/cO-ȞZZٴ㼚|e쨝$-]o`Di NVS),E%aHKM͌iQʘQj(N(b ˥(70ʳ#P?X.7Y<%n`` r$&iȎŞZ g}/%uɯŪb]6m;:>p:w $3DS큭/4 _:Jf!HQK }yh\"(7 [hua qBZL؊@=K1I l=GXhNBGE /ҽ(mKV@ZXsBL l taR(a؋[j4Jc٪I$1vW[(MXW0vtlS%쓄050ʹ+h Q#ѿ)EApWZI+bN Y5Q-x$7)B|+ҕ*j--K \ORnA$˜ 2Y+~k/b)2@fY3_LfѴWqzmPŌxQ ;&SNNϓqu:>̽S~ylCJj[Y\ZǦql?/CO e{6g]D hݹM_~u)*_S!+B_>|n3AѳZ+&({wZL P9TP,[$<50RIf`Kj^ZbuO}܎ZA,(k (` "+ ;@NE [~LX5ĎS^jpJfg[`.co@UaBbf*ԀTq̮d_OJn5A $GT);G8-OliO"ؒ.~Y5 *aUIY/s%C7nR8b]I*A{M @Zm4V{O}G%@ }'kdu'aj̽85#nDDzhCp ]P]5h !gvm͂gg0kp칚uJܸٯ7f93 zIOUۥ@5Ia=Ү; ߼|"Z3IxώK fl%o?mOLDY+T.;?I8:FO$kqff3[1E>>7s=l1ʖ:L8 hi-a=d=et\( B?Um"b`,zmkfLaV{~Ic֠au}zN|ɤ1ǐTY̑i8RKHIJRs30rcaIVKbi)#<3ac r `H *`(6$*`}i%@ Lb̈=_lvmlRy7Y,ﮫGlm'^ߙ'ӗLrVO.X5Jp_ Z#^_/Υw{TVm_n8|}iָVpcᄷ۷bհXKͭ9B7u>m.LRKɦ ᚒ[0`oxﭔ-#Yb s{Q7[*ɉɖw2Ts@c: _I4z Z׾ȌzYB[zV0,dbu?q(s~صk{_|_ݘLח],/R\^{3_]k>^M_Wd&'XZƝ=Mi~% cay6`%q(%#>'2l}P I3V AKU!CPW//>e'<+Nkc;G%- 'N;r,6ʼ2Bss/^sbLDĒxضX)9,^{^"SPF(`ZW)¹2y/ױV^k~2^KkѭrraEar$̜߼z? Jȣ! \; i9I*)d(OMn\˺rX{0F{=A'8> JeA`2 '#R>e R^#tK9(#Sw- ĂtSvL'cOY|ZKO! >--ŧe 2/>BKO!'4_QGQ.q熔G3 {`bOvEBi6{(9) [7hu0iQ/3iC^:jSemL ْ@r1vEH{mIO}z1ڑQ黽]8nkO4c&1_ъx+f.̴>pŌށuĚ?}CŚ\H k3kjmpb$؟{|22qf1@(fq4ma((::740JA.VŘSR*_JZ:ԎX] `pZMMZs/ k3Vm8%ŬE̔l? 3[qjHI%,|ckQo3~f f™‚|cxn}HnlZ V~yϖwƋ>&؟/دިi)2&ijbE}wyks77n w*$cry}ZL*Y'6?r:ANNҶԲm9%dN\12R{61(+GQUY]On6/)r.l(AhHmQ1~Ӿgv$Lg˪.1 };lxw0sjHe`os~K:UY&)UœaEʛ5"6`Kdw| oV_ N&9E)-eHX0EYzL)ZJ д[Jkȹ{;@N̶uǶ[8And8i133QvsNr$F^1w_ƸkbVzq*r P?;q$QZ7ȎL3TUǍE2΢JzEU}%jĬfGcчIR#d7 E K)LX/icbr_k"I,}cfz.2Fֽ1V_ҙ;IƘ hl^$>e#csFM-Wnы U n4˫.__:;# ϫմҥlua(RCŬv Ť58CDvI=w&OҊ7n'I=~w6Oc='A!:OUeT9046m!sD:^ K"t-啚-67^b$PoOG&]{qI@Ƙ!WN/iOsNgJ'F \|;rҵRg.ܸt)sk&ƨ۸NWy%< vz3*;ń&5t[pTvpz =}tҕ[h;H#r]4yA:=ػko}{T_;R.J??XY692ɗ{)k3WfigVJ9CPd. [rL+4R3lm>㪐5(IW&quWO!š~HH$aXcK*M9bSN'TqQ+%lVJT $Sg1Sd qPKr2&y =߻<4)F B[pp4'@x$_ã҆9NBMa Ki(ǾOK$K]SuaUI NrF kJyWx ?.sy"O5|5ypXVyŽ_ޮ;x\vW$Gvj\ڍÏk% NG2> Q~CLB}Fұ`S)  !ڢlS0=ʩLN+e] S<@S-S$+.) 'V[dtIkѸ -%Cm)BI%nY]@6Mv`DKfT_xoiuZmnt[:HŨH;Hæg{'b$խh7sUH@~7?}): u(PSmPKU0Daq?c0P 5` C#&"P%:ْ+)%-17&-z:VoA{Z@$CAP*,eY:f(brɔ*k%XVrg' %+].EY7M+}ہ?x_egkwigxwxMEu{wHj_-JQPf /h9X UO}Yw'wd=WյṅͯoW[^bjdŗ?ͧo؋t#8 U /]P]V_b[ZBq{8Ϫn;>M@cyp^Q;TPܠR# k8VH`?f-kϋ%&%E&_:܃ }W;i:K%jcݭjzc3SfSgۥ@=@6l|sP1zoXi,UD`< #2 6f|;bVR`NK^!`EDi- j x5Wǘsŝ(X-9ɢBd1CQD =BlArȱJpߨL FBٵ1`BI'r=ft V}l%&B1}Fw 6L}0VKiK U,"k6.0ARN֖HkL9g{8X"y(elY<~8 eGb=c_ SC69nvȾ*YZduVq䌶zxf9J۔A@G'iAFpQE^UVx&9CQyTt8b" NULjFH=uڋ +o*8(#דa_J,NIb>ժH)"O(!@Kظ[ÄZ8,HP:,uGUk9G;RdւJ|K?{z+SS dbIQڸ0_+/'(zUJ"T2HiB1L΀D@ej#*hiXkݰ(j^"@yx jA)`WL HfĬHdlb vB00-5ƌF{D t1>7y{ϛR ؇hJeDit#C8ETFF Ō*53`Dgґ#x03%z3:I:•D>,,Nb>@őt `xaq#E3Eh|Ť!CA-Z:j<TB?C` ey9+d& rN$,=݁x !U'9f̡ơ "!16IgAjɴÜBa<`5e.:'1Cx̬/{ぬ+'|))&W4]Qܽd_4%K9cP-:s$P9\#P`S' < E :Ĉx%@IAT%|h|JU=]s&a#gVF ]DzBys*&MU)wJАx(2â,1I >G:xC_z<6QGTfc)DX:EyuPR2NzQt ISN;`쇻E\\- .I m,o]pL7)Q$띃)yum.[UR@b>ۺ,"ϛ4 wew?Mw8Q}^}2B~eKbT_~Sny܀2ʣ_V[[_YO&TKۄҭ)}G6t4V\VC>s-)SwKTQDg) )I IN<ڤ)gP^J6$TɬSPЧBt ҬN \~9C$}=ߟ-n.=zyoGҢޗLhOG_S*zf(`I E1ӎ\$*g42)C"L."C TSYʕfazXI?IִtLs*5hmi,s0JIO7% Ժ+τ4jpg΢E<]F,N;HDVӡ[Bs[ YDOs8W|9- , 2FM=\ezrz4F߄ E{$-!`HN{ye_nVn ~9[= h˓0RS[sNwtQj&>'}@vpr>dL(~Tk|TnSf[4k.?D,wBa:vYJvsTC3jſ{̒_OL#!ibFzzZ]~,c֐35>~ʇsTΪqcہy:oq04ڼSiYJ}zu=]j!ܣ\ Ͳ&l|7c^`]&Mu_x"%[}~CGՉ8R rRK%}HQە}.<3]C>s-)1`G rNOY2Ts:N FYJ83g"=+7w9k gg` 㙠Ӛ x)bNī e<N HHb~){fQ0 s q Ov?1sYcX@$,j%;b"YSؼ\ؽxlLtc+Ԡ0U^ !,+7&2y߆JTOjJoHE_%at_.&(ߞ=IVp / ötR#RÎn~⡳\bC\¼TP}xDA7(2"%|QsʖZ=QyXu_^Ą.'}rA |(}/v7ZWW wo.c^}}3žpgU8%77꧟&i%t?j"T=z{Q(pzQhPSqAmznoڪD6K?ٞ<9߮tŤ5e<<y0'LNm ] )ʨPuzMUjPpΨţG"Z ΀e]:K Ӡ4ޥŃG"G. >EA(ֹaHL:%88Vj=C4R(o^D$Us=@[L.--7n0{c6Oftý>3\\>~nqV;ƈB\f H%(Lh0LJ&k`:1=*;svN/ވEǎdɲ'!d\y!WPF9 rSqJ)s{ue!ss>\\.`޸&m;PnnoA|~ >_.olcNlS NG!x\#DDY$=R_j=KKw8ξ"*>V*-2pYt~v+n&D}H5F gsMy$`4 0luca 'F9#PsFx%X5:Sg56Wie3B k L (b-%QuOX|5p"MGa1HR,|8 6t1$WR9#/^pnЫ`mY?|"7>%g]=!&xTQYIw_n.RAϳw) YIIM"J+$4yQ" B"g2%iҷH/r*SJlTZƔs&ZYA?O9(|XM 'etrP\~u͂ڼ{_zKXVY\7ס#3_W,w| -k(Wʁ$ &^쭬S{*Ъ2 VRxQ pzXD'o@ "-dot.=kH ť) )Z#]3$mZTh0tLPn">zAf(r)qw8bZ!`s64Jcl%icfsuvD YxRҤzTtHASsE%'%Hk`Ȋ@hD*rqa]0O0`dE?) LnlmD98h2(F 0|#Xs`5Dil,AVHM9bQB )$[1S,~Оu bq29K@1\܂,cΐ󙢏r(nqY-#IG,VQ. Enh6IDPrӎئݦt?ZmJY&E+$rN{Jϸ9Tϸ$Ĭ]'y$U{LU݁ZK'ȟhtBA9r}'Koc78(8Cշ4QE砘,V)$U_\)c dIyɥ6ܳd;:i:=@XD&(%o4[9_,d!H b\ ͪm|?*mQ[eʼRH&Px6PPVQ'+_n9 RTNd_(#1ZYcsۚ3F5/E Ec"X|10 f\6{#K R,F*߭*bgǔ- U ,)l'g?oyB>Qu$#+@B"IOK] L]v 6Vac47 7SP6ba-Y {},9_cIPE{ƍ08``7yqAv]ӣXJOMUKM5w'6bHV}=C+ {nJځBitk%q] o}CX,6qbO(2O('9TiP[e[vLGpۢoJvW!GeWqӡ3&ҽE~2E쌋~rjXMv39n; 5]%$5М mL"L#Qן[qU&S*u2<w 8`BN$)S7* z463|n~]H*^TC&pM>\Y"ҁ~y^ WFۋt>F!S]|F$Tr~3˗33U|!pyp-Ɍ)s*큂VyNG*j IRyZ|uˬaFNfSgqrusn%/܍w(ܢ7s[[\ya !^V͛`rfP˫ y^9߯m25^|f1}/2qU|ٍy]ƥ^Oe^ͻs:N$ B#⯋ Bi ~="{8^>eTGmPv+|F?/+t6;Ud?O&5*}*X c#蛭FoCɦ jS_v=4A"wAiH"h^2C$OZOU:h*J{k_NέyiGzX߭F 盘'6y?xNQhen11=scW5t>S:y)82ϙy{(Ō1tռ,}$BL_N^^E.홦 ) XIOZ z$GR@4d"Q寺]͗yp7_xyCwhWGSε<%gw v3w˳9ߝG TcU0,jo: /Dc\{;̫e+5Ztr[4{tŠ{7AWpo5.[$EWdU:u8lzn΁n<[Iy[󨱕YcK/Bx#F<=qPpQj8c[Z@2v峳(K3H,NLj ;Q%~$0c$ż?(.ݻHw\s;T?mBѱ>&([TLj[3>)ģ{^pˌ"[@n}ˏ8V|#` ^`c[aU jA$91h5MAy ;k0.SP3œH͢=^!cܛd[pIOZTAi5͹ZJ2)m%LNqA4F@M%aq7]ОTz/2>h>YS(Ma{cFBE!Q:GљbfPȌO9P8@jz$ j√"'9I )?Qٽ o9nsjr4.UJɬzLPw@fZIbzp )<t.6պ/eF5O'n)Ea Nqgreǂ}9Tu4y>/Zt -S 4YH@k̠dN1~.@4pD>¾3(eĜe5{M"0Uƞ@5vu˫W%,k.eSp=肤IH eȨF hm${Ӌv~9Mӗ[ejܴdG_τW:HY4+Fd˃:T(qKC&d"%fĤ:൤K+M"2TZ3(oҌs3j_6o!xyPnʣZB2i 8pJ|ZRƋ\O4p/esK-*kNF_?|ݷxϐU\'(VTZZNfr.[B]$TDD SU:jbH Nw1Q5jX]f||A@TY[ zy/Yſ/yyNdڗHe~`9r5«(:wcZɏL7c2!=Pm s鉌 `0'8,Y<e2IL#%Ԋ9+w9$h^I3!0ʧ@z_#1pAIpYXdΔ)$|T yyxJ_J甾(\~iN8/RØt<:GX` PK$_I j2C-g ],ɼHB~C9P'z*85ΠqܻR OqJ&442AܨvHCԠ 0rIWۧ"&0Pݼ]o|sM޻K ko46+C](7M@7R`ύk0^@M6F#tU#qo!&"G'Sh¿EXbbt5x\c([8[oX?oy,#-U)_t鯪}@5J5鍴K}1Cia5#-%ȂΏ3cmIȘK}y+5.r"8 YQ t\ޥ=J=Bx)!0nnvGQݳ OSMF¥x6 )u>>nXzI/W[G - T2qK68ê-ٟ(G- .{4tZyR-mĖv 6؎fu]]ǻ1DAahZ s 1'.ɹ.^|,nn>]ŋz]ɻ=~?CPsU{ uiCTF0B6J5)@ Rɬ&N2jb܆A!G^)$[O5SpBmzBֽwP02 RNQPt ZwɝB0:un}O|3u=|Z )T'>CE0Tn?ʵ?6n(a #;C8PFs]nk^ .QmgEK؞El?/JwS4;߼\Jc+wIxp4ݼ~) :T=[{C JΆmTZ&rS|yoSvyoAC~*HBy`ݤnM1(:Mۨbt~PFfukCC~*D3&lihI.D-',z7GSRed&p)m[3wYՆ-`' jXtpU'v DxffgNd<[W7e/췳ﳝ|\as5xMw{{l8_.JttcF]sѩRݑRok5G1&:fXt譅̡8mZc7KS7 ޸a-fvMq'?Pc-6F ݿ[;U, * *UTW⻶.UMfGq#; "V0N`BNz穂U\ө.J"'XL L ҽx7ڊZ Q DjnII=(ڻ "d\ $SgO'y dO \5 BDCvO hv i71f0B`bU[fNuz^:Gkmh,u ҒzE3H XȒy4'n,Ir a!& :Lr* ZԒjkzyT<dH+Xf.KVA9#wz?U /b5#d&%$B 9!3"1piRt82D98)0"BSXBFIDyv4mAɭmYf+f\2hDv؞2B25ƴ2E(ffMruʁh)'GƔ1BT6zQYF;tMLy{.i+m$Ee.NKyePAe`p4J\5A!%Y kAv){$ɷ@n¹k4c㖣RTb|!* w1TEcS-\^cp<@i,wkj~8 7W\2)e#7\0wmHF:`{´UJg4Z: kTqG;X^+nHPrq_2B'I'f \2$B ;@CXyiIa2qbP#`@ kmW[0h4 [Z 4& 7—!7`0crI@0ںQR1y{G*8ygt=. GA"Vl7n)ZUsg:Zfsiŏ:AC&w4)laV<[&3NO甫Lϐт $ջ,D VO_;NMc]VJ<]J1e!In_[(mKqWG-sJ\(λEq`BdsK^nnc8T7U$ϴ1j9j _-ƈd|H'n}ȀD+tguQv+(;pĺ%0bN>)%p-_iF[ kja=ɺN=i[z %yXM \:ч"^)(1C-mJWq-3@iIbǤG&%5IݸMN5'0N](dq);(\B ]߯$wPOM^=[C9. L]|!!˄ȑ>[c8ғC.rV@Ktt킉PL?( \.l@ƃT 8`O>r ×+уb/I񠘈򓨪Htm(0zHUArj QY'd>%>V僡@8>Bn3nE 6u*f {z֟Aw[p/S1r?mIӶyS6\Z PYRB(Sj(2xޚeS')0xF@ixm'&0F&f`5})5[AIm]ibDn)#NU|eTV<Ǩ1 tY9T~_HW'>ƨ!#MO#D:%KqMdٔ޾4پ4$Ndt% BZD].D#1>. o"+epF18Q;sƤ*r[C5&#tLdBmɤ9i@QJT1b+IswUL;$;"p.4C5TBG NE85r!X7L)ȹ&Z!SFb.  n* ߇1g(922א\b7/gs_$ \⑭?\+8] {a{MbE܄9cf3:bkF1A>:t-씨W%^0yC /Cy88`Ʊ3ZII%|?܉ Œ H져 RZ9@b8ӬZ=tÔUﳕEZViŏ,:Aـ9FtDyZ&.IhcrQCajU4=e6Fl)L x8)^cxW1Xֲݜޟj@>Ƙ!(Z~ z֫gzG3K)M={ /N3{^'>woTNl!GN{- ȥ7} (Imw6xlw:0*("fwQ=7\䕬 CBJQ(dYM) 8b k C @eB[;`c"$SI-@"'$z"59ƔQ͆C8*Yu\!ߩ˸ Wgd/j `TD0$H@d(d2. {$-N8"?Cgӝ@ u^gP\(taAh2( xv1yU @T!@!s >烚eԻ8 v[= ]F{$h1iuߌ"8*HEd|k/a&w˿_}X3T=} oO~@-p))DOadV9FFr P,("F/7?k'^{/m~(bX5u5QF"aL!\zݕirp7)?*(oO"o-dd}ޫde'G^= }P?- ݖieN|^fY<ߨȜc Z^)H ٌ+0L$~o ; C84  ,yVNo<5X;w0m)wa1cFM? sI42"H̼mFROyT0Yo=$B?{SRe3nK6Q$hu/^9Vnn]`:8m| pz fXVOFjM1N{<&cHF5{*+#!q͒)FGUnl[.)Fj[@SW r/VTJ1}lM\˒Cfsgߧ:ZOis|"zo]>{(Rd~h~ ܨiehB}u6WQ#ӣDt[oJ=4_V=jI4ϾE!}1CNt^?|.,ՏhV%+t3ˆ&}aĮ։"sDT8b׉ל)vxB?$/}Q>ݨPlv.B{%&b f82uNQ.@zQ'+Q'6œݨuBJqD޺ 9 L}N5zCL`H&2$o֊ڥvMebNZ#,Gwԃ Ӧg܄vt ,NUou&?.HR1zPpx O!k᛾ U~ 7 PFu=H>@lk@cY IB7bTRZ6ǯ;)vĬG\(.: 'xKG`ZSOqI>m۾<lj3Pb "Zz{Kha!FX%oD1~c 5<>Sj(1xA=y2PbR's~u)5Lƒwb)l<6Y5q 1>3 +[ hXeЊ0c!*8iek&GO$V004yA+r[xZ.6::|Ů]w۠bUZB~4z!JWcZxZ̧1ch`M<~'6Tk%oA}0ƋFp> Q$5͆͜ .6z;V YDr<΋I -F.?j8!|S'~E6-62˸kdX2nqE Vk- hY(-LYߗܯ7[nV2̼2VX;~;%<qyD-EQrNpAa|)d(,|{ow?іǗ_[~/7h<.a~~l y?_ ???bl[Cyc!*e 39ƤqBJ rAAM@B&y79:@7[EG0Smr?<πLPs~""0]r$Ǘ) U-t6^Y~ְv~M ؜&LZ#Hx1Tү^әvEIxic̐C8p$?YC5LXvx$LsCO>J 5b2A k "ly?S(T}zz -J #c",m'V'DSyUZ^P½0+"d8"2&z uT:eG=M!1wp5y>Av6su]7ONxP>k] *oCbC/d6 ExC $y7$Bü 7v.g$^d|$׊$ڍDgt(dbz"NQq149;J hLVֱǫ`Aeu>8CЫ[CeVBB"[p|FNF>P|~vV{@)"3O0ؒV=eZט V/b:"#~ED+Ɂ4])VY5(9Q3W(d|u/Rf82ݑ2hb;5WKt{bn$}$3.oCOdnFnV3&b!O=Za㽰ޝZp 㧞tXGV{:Dj ύڎ;ߟ՛_;7:/HL \U?K<(`j2^?Jj_|>M6~W۟X|)0*Vxa5ZXw9=kYWSo7E'xsE37<螵qR{SnC:gnas{ k&W{ݎ{՛_S:o}K<8aĉ*ܵz}rB8{Ƙ2\L| 3̽&\Z>tO_}y'pڨ{QlW$U\y#/^]'9˲ާ)g^hkp{lߚ븳9?S^},/6±R'gzo稂t~.2*}ܚ hB%};/;VvLc"g1OxGjD[N'@Cm)eXLJ)}&zTjB%)rZk"v h{{\ګѺƿ# GK$$r?WE,Y1UiƐɑ`[|e]\6#HSLr^-Bl"*Urb"+1c_&eeޚGXm29 yS!|r:ig* -tHHXe80YcEA>ci'1"a$W Id* p T+"jZ /'ƚ6QjT*<( ٪|pMYiIW ,Cs޹.`k*lEd}PQ-d| TI%! +O@V' UwX ̗l7=%&|k't:﷜Jp<9e8 `-ME 8R OMA\l*cuX(LP<@qt5a%toTPy|%L[ӭ6?,-lY@~б%AIF$(_!o:\FР̨6!y\QJH]#Sl@7JXf$9Vңv:$\! kKtf*bXA'#>'TJy^Muq3X,0J$Z:ha" +QaL*$ IuX-isDYfk^6b$Q.kP CRp$:jmvNgIZ(VI1@QI%< eѤ$} /a6Y jr~b3D ?#P4qNZ;-SS16%$@Tjz4C[)7fS3GQS$PM ?zrm>/ؠ\RB)~Vb> 4~k4Qaq,fu ;n\iGo~ׇN~Rdwޭ\5c{ig_yvyury'#Hyv|^t;V]|a+Ki'&߆: 5HIN|xﭐ=(IehЅƲ ISW=thc0m$a S 1Rc րi $ڕI )[ Ħa` [3^ikcBEIA19 nbl7AT2(KWe`=j)SLŤWZ@"(! D|*tdEBݒ5e鰚Gc5[FQ.RPR8 wECUǀnGuh\ ׎g7n^iX\,wK;Q ǚ!1.Scl詰9L4HuN‘@aA?I|+Xh:^Ҫc͋e_긻`wXS5qp!A' ƾ%y 8>Ї]<'wkhQ_3t#A$4 Ddg6fDS2U)ZG3">Sq淳!^^.oKHKĩ8G6<|SpZ)s[ 00 C4ՃƧehV!jΖl)lxvxXwcqH!UPLP8w  NϭDw.ld)NR߿Ș -FDnGkvY%s$8t5NZsh WÛ޺uP6F$=BdDRSY=)Z\w.WOv` O|D-LR&6C[ijb36mD6Fgۘ~ #pԜڱ@[mfib߉3d:2vy|Pa_o D}yV> ZJf44p8I]'gdA*0@`2Rl wnS"ށVNF[`J0n~6ng 3 ɯZi-ƥCA e#j4aAyk_c6J MUu@VX*[gVIʛ'Lvqh AjP_tCnpabj5u=M(D8mrAz.ejRi(E:m)Eݝ!Z<9pJ3hҧnݖ&3=-" 6qF6:•\",0t# oJzC[\i2 Zg<T{#Ru%>΍(*e/EdwЍ5EUgBmbrb>󤘙ELxAsDD)Zç,{),Kxy*]yYZ:raj3*[ AQȯ`~L.oNW&KB~ovv{rFy7{7s|d|9<|{WdKNS>.?zL[́΅٬?u]xEHul: #0I- &J:8DR$7ҡߖR󮷓%?yNCZָwvAbҗIY_cTnHrp,Jjr8Y̩)E`Lʽ ,^,K.oPě3r?l&)O8"h+-@@SҾ}ڿ@4#7Ç;IH &t%HJ]+NS67o~:tM,u&bU,>D؋(QIːb:g`3kKuXWNP YieWDA^z?o_.\ꊆpg4ma8Χ_94ԕjcK򆌁KoŖZIq`2fH KCKbTkrFy4f3uTz٬Q{@ҁhAqV^$ǻ)F& @JAQ0I `TdXE" #(GN2NpƨpBGA  BUX/W}y?QZmYY򥇲sy(aW%]~խNȀ23rvz}S xJ)oË]\"p ozT#opao@cD8 RD^12WJ=oݏTdb<{%ĶOԪmaJ9H7&֛¢VL OzGBʞ{Rܒ39l LE`хq簈Rބɸooo41'$U\J.!(Jxbvpb_.'`mք#Lc%::f4?;.Ќ,֯( 4,Yri'o'I%l߼!D/UZ.)$5^p㺯9s}[/u:pPub+` 8L!X ZߪZ-Ț*vIot͍$%_aPJt1Jv `D4`V4*vB+ji֜B`4Wăz * iA)vrJ/yj-7$uCMbR+~W^^;Iӥ[fJwZn;}ےfv4:ƔDO,]]`kʞl$wW:r^ w5Jr+ݓnEr(h8PWg9A>n{uӏ&B(%8E2.X 2hNXЌH#%RBMr®Yd9?gN{#*S/޸_RZ4Õh1oHG fR0+ tvO5?lNnfՊa8O\5ofk ٠z Q[mRr[q ÈVsυv|ѡz7NV to@JI7ԛA2CDpb߂N>7]50]>QF+ִTKֻAxśp|ƽ~hWpKSvOjBTiR ֵvr&"9&?7 \Y#|Z6G<壧✆C z"+%ȃxJЪ_xoQRDˏ;wV#O6=iaӍѐ'H`}X]&[:vrx1&{Q96jAe\/gx{TfUߋٛ Zw6x#%H*pZo*trQ2Em;t݄XG.H.N'ϓߓoཛ,Fye*Mpri:7?g,!OIK4wGBDZ>eT]z snۦag/|h!+\g.Y2P-m&q'[. N3n-z3(njj&$䙋hLʄz[jv˥A}>ڭ(>y0햿7vkBBnȔ)Sr*[4V$qz5 &IⲘL/)ᵺH=[.wLFC{zQJ)JۄZr]1k_ujuߔ.HPmB8/RzRER]#ߦ&`y苔>j)%צ&Ԝ[J'T']*?mJ]PmBͪө^1J)i!D1}.Z" LXåDqyioTTN8x΀ZkHvjoa`4"Tpylǹ roB-kB-(whk0>*P ^x! T poO7}" 8HIcl\_+j @W!ʀ5'Dyd948(l-)|lXp.jN>HY4^BND;A aƜĨHT\5>8#p2pХ ! r0!:0"f@M5:8`av8B %)KBK4;#u畝?!-`TDf qWm2en :K6O?aQH[-66W <pnLmvJo^ZNV )@ 4}Ť38Zu\.rUs﯏j^[ho=6Bbюg< v $#![Q=bi"5V& }45EY׆>g}+}zJJٕax p a/vBZ糧 O@$kxcvʃk+uj: \KMHsS_d5ɈSn國U5 sA% Y*Z:=^GN6좋G$UR\}I~1e2bĐTk>D?_B OyH~IU)|hF?!h~:IC_765&AUwn6_jŏVa=NAzS=lFAI9A`cZ0R# Sɬ& Y|D= ЭT96Nj/U42<VnxR&g'~`5՝[{Nr"Zm l2M-)2!6&4UorA"Anݏ + {L[oFM[ kJP } un>VH\?hQHDKPiB)Ri}EӈwXx%%w:ƭ_p,A%kvf%18D~rG8H?=D:R_gTԨ0V}}~Mg8hZu9j(N+]-צf UFOeE#!Tc@vjrhj )q3cjf>i*p$)"O[N6n$"awr"ed)86lifWM6EIVMQX MVW=]]]GY WB)C ^8oHF5:䤨n}2R!ׇC^p :+ Zj՘( V_rE:Qz(ӚZ4z ȱ8`_ A}PvY-{sJJJfAeZĉy6mW]d%Z./.mE| zcZ1,|C@q$ע=w6.T09EqHJKTOl2t(k}SW}Wġ&?"jV ,udӳKMߔ3jya pC~_//kl^Rx2xٔa"e !R:ZxDӛ`"zwOI;jugf.Nξ+{Y \$e~p6~զz#-r`Ԣ$ލQ\ :hM̻\$z>!)qwM F|̆oJ.lp7ټv7= C3`3@WV4jLYb0B=ގQZ1=PDa9\x;ېvOzx0ކ6"`/1oCwϒP==׋oCrkMT12yxZ؏`*oC]%~;%({m4 کry,;S%Z%EQˤ_JRNyÆ$e8xc/bI4񀯔#,IkSXdt$YbTZ"L\n&a^ P@c`U6K ! \X_#,\1BS "Cj!$0V-6-d.p&q4Eqfi /, ΐZ1D:X-,ck׭Fƹ`\i,V*4+  i2 +:&x$c̘D ż?ac3B; !ϐrZW*! ? xMBQ7 Def$ːc&;HPPmX9[d.M>HY Ղ`gJɭɑZ;wP0Tz%$bx|`M уXs'\w9RRvڐPuSRZ,]a8rM I,+=U3L2f`ۤP csj ȡ" o9X MA:H ~" N & %QS´WH[6` Aao(q@%fvTFeHuڃV;>%?F,b 0 c`$‚3E(!5giRswGQĝ/I}":ƝwbtR碽5hԗMQz(M*(O$eSjQz(, 4&@)ei(-f\ILIS(}I˦q}DaT&TNe,xFG k°V'ThHA@:g=+)rj1EAVgHV|,wʑHDAAb 0D3M),Ӛ:I Rs&5GxI˦Ԍ !{1W>7e5-R_6U0G4J KC)4߁4Ғ^)9 RJjM(=l2RWX/I}ٔsrܩ2J9JKs1SK1җlJTcwD!4+|.fAF$BvA9lRRDQGRPJ(t\ QԒcu| D_dUʟ\O ha`Y=pI a %ƊƒgqR-%9\l`IĆ*T۷j~]J-^HkalnNJa]u,ǀ?Y"nKa ws9Ey?|ˉy07 = ru$gj<YFU hA1Kaf<(Lde!1IPu_&yc|Ijƀ&>5&q^D}^LhfIOI=Wiy HFĿZ le S{Dq=fk)ЁZ94аSU߀N{Ze5__&~YцMQ7,Ql u`_=; $;x_;DŔr6-5)sd48-),!Qͱ2"j *8 "*!xE1^c6 xe{u Q?:^<=*UR)?\)!{ ?%|'C7_>ǔw "N>ފ&ysa0ǻ5?S. c$P'/5QSNaѓɵym^x )0Q2Uk 'ۅֽ/HE+S%GULefӏ/]wK~j)f()0>ˎ34}aeßxH`kI./f˳O\>}a8qxJ(wFvQ%0GWÕP?ʱê|;gMW?9_D S0"쫛 ~hZ~' ȝυl|?fyϥP‹ DƱo,)˺^4|X‰G*f>3 ~g:INe~ؒ,;dFrONJ0$S\wڙi#5S^U `%pE0B+}ӖE{Z";VOnb0Z$ZL)zo7kk!r8h*~IݝCgvn3f;a=~ LzFFmUP_|F^yV wk:Ḍ0d[,5'Hdwݥն8Fm#,[m NtG:7EXq]L+nf!2.n_"}&7"[ H'b6`H|<;̨N>fwgg) jϕʴWz2sw's>~qRއb]JV ɷ n3Go훷$4m =1>* 3yY4TaN,,<8BWzNY/3 ҍ>5f=,/Q6 0J!1)![D}Γ>@vpst7lLT\"~[QAᛘtL-W 4A’G|PZT3GUFCL>e# ;5<-?mo\-c;+k)^ldk~edV#lß]~`Ō'Yp7qK2xEOf#IvEXyti{3E'B< S?wvͤytKr7x|8qx a̩KJAe3{6^p #t}RH[䊀ČÛB@ ]H. e5v\.IRz\XŮya ? 58G EVr,e4V(Gp)[[:iKk',eϷJj&<KZۯ9T6g1Xx]OLꝳu3gS ƀjFƀϿ4m|,x/=F-?5ٖ?٪'t8^.+zzp3^v6*֎᢫6t5z/2ک2읮iЈeg46VOŊZü52 ++&W7q=cǦDcThwHt)df2 Oc;dpbK[mLZvχj,SDl=BBzsU34raRZ/,&Aə.0BP$O/XQUA-LUAUeCKf(ci%)@c<39$A(UhK*tJXf3沜oIijS' i4T2y5Mcmi-qd>,0$ vvpTp  l#b! z}X N'$rHiJ k<5s$ #'HԆ ݌e6W[諓..{u:͔Cwݘٌ38qКjGÈԚHw=쾇t-u׎P;. pT8ȬimB4eOXa S[Y, ť&/R) /:%(5_ NE4TL3nJHJ&DdYHY"YHf1.m(@(M] 9;0ƎUe0&ni 2D[]+\vnuaT{z6~ӱcJګsr2=찉R'+1]n7#I p:S9W`1]$N,_ pa)\ENmKaMj C #u{ C5v~1wu]))M+O>r̮_6~jsk5RIs!OOn6}^U/'}9o|ҵvwSYe ڮw P|tY+wb}^6u(|eAd" Iam+mahu8z`_1v0; :fx%GOPȬwKhy~b/Rmeny}2O#~S&Z T|0Kѭ[^XRVzQ_mhY=75##'% gԥ)'Z9;(J0;1` Acy iΦ4E q:lhݮ:I[pJfR,9C^j@dV#5Q\^ DE;;VFU NՋ]ڊgK1P@qM9?g= ӻ×ag>t:?/%b3vpi/+|Up-K?τg/D!j2+i>z#eJKgcnR1=G~r)I! S~/m[W@wxF_dS;w~-лw.6w [W@wxC"RɆ ipN;$[0L;1`G238`Ŷ-$S]o7w:g.V䞜f-﴾xZVr=i9>7wg}(5IOq"?Gy6_@c_˿?|JG92ywR̛#<:Zeʣ3O{-21(D^" o>~\}Gt}^ԅPabPU]˯.{wPGtL\(Fi3f;YR;Q"G<(]vI1DiWځ8"(E#h[ -uFy&|ڤJ=j7?D]cxV㍘ 4|vqYE`JNK@)35~x{X;R/y`J}) ;OZygeoqOrn`;&=\^-K4Vđc%\)D"I A.1,RseP{2Q* Thg^yv]t8J>tn3̒(@9sT\Ȕ6iJR\1eEjIXq8@z?'[?Zܦ9d*޺q `Q㬇8 suo &UY@D)6I\PapL+L׼ -c,#82S d)fx-SRNN6z:r>!F:F(pU4c91aČ)]*kT a;p`) k|a80zIUdB:O%ٴME 4RP.SptI Fi]YGDePK\ ˠVhm۔+iWXE;:F -9Fe-X\m:SFP)ic>#Fwݻ H喇k;y{&rSw&ZOl‡-A7^:Y =&V->>i_-ƍ?7?#@R|`}$Yr.:pp廋>Ow}ۖ}n B 3J<,%*\ʉ:"Mm18kَRsj ηMK hqICI-v/ #n`>`zn_k_ -Ry@J0KiiݑL`&(y0bd?(u7gn=TP2rPw-4ITX4wTC}СBr'JfH !2%ݨ&( YUB3%`5)eJNPWx'q ϔ-٠˔DX#˔&ӣ[q}eJ", 81S2ʔ2Bz%(;N .jazO )G^lɬX w#12ؠ$ŖdRK8`5F^ISvD^% -sQ])kRD5c=gKb}ْ74dKD`DhBDMZJh\r5!O%{5@>/RrQ 쌅,sk-qx"E]jrf:nϕ0.A/^HpFqT0J}r('E]jiټ{:f3Lv(/E]jӑ4*|nRJCZjyq4(^>G6BIlJKy(ER,R#Ci)dZ(=jT}1D(qDQ g,I|<v|EꋺH/Qz( "/(EI0RčcDiP=s\vitu%Ws\z(EC)M+R_ԥF=Q*uJ~@a(-F) 1TމޗUáTމHSFiQ\y$P : X婙Qz(& ¬Op(& ~7Q*YJ%[߁J% Ci)VlFX-h7)H(ZvY@[AV si \iS].G<9qÕjL(pIiYU*|nɸRr97$,RzL}yN j^H*NbawW'˜a-t`5Es| #ltus6g@[3d jކ! aHZV\'?=b ʓ9m6`h뽟/Ii[抱-x-Yi+=8GjvǪ@qy,߭sF۷xiL&*S&CA>-$4k2(ɸ5`8m+T1@2CX1ر1v \1v1$*+OQ| h%+?׺!|-5G/. }ɮb:" {f~M-MH8 yrvP;"gO+W;#O6&!Wԯ*:Nay|Fm7Ur{`xiRoR(,݈Fi?&D`&wΔ,,ΔJLJyה~]LXQ⇰$h&z1I%l9cؑR*F)=Gsh',R/:>O~Y1u#hBۇy۟+knHY`U֭>HD+ey^`&f5@jtPШˬ<Ǹyҥ'f.io4!3'{͌f''R o2$?}xNjEl,Y#xXZ98ߒ w7tV"3czf۫nIW(CA a})@i)L jmߔ|J5^j &0ɤ<~dW/_u~ʢ>9%1vT/O~I U(vB#Ny~4z#3ΊڬV!Ke}|ʻ&ϫ6 Um\Ѹj؈A3'AL%#$rx<Ǎق:/z/4PO5 ]{bK T%׋tc#5{pCȩ8[o/ю]~X~P ﯧq9+r|VJ-$3D3|X`qZ@S@Z$5V"Z>Cf4B),Di.UQ0se!HôpLq"PRSgoBzFup]Æ@8'Y#h~\NP#ʇZkDJ"f^{<5 N8מr8*S jîV6sG F9.3aD]ՅLUnr~jP guypkͦ _g %eXgc7JolweG.B۞6Dz9Nu .>-θl)8>Fq ZITn4[;W]O4z7]֛A(]_g6P?GHvYض,S\ި[ =jM ` .&rPY8{\QwT5|S PX^^JMdFMj+Hh}Қ{s9 q񵿼> hE-jSm1HPk|7邠|mwqoR2NʷPLF"MyE:/5Y8]$8N)$s}@{g=3VJqZ!#y,ڊ١tkAtۡ4 7-;eJ.8䅳h+d5{MB%|[K 6Ɩ}4vx`:΢<2oT൲c&Gd3qSA;8'z/X?zEea J/PTqAxã2dFc, ]0ps?$\.[قY'i8s7nC3QKdO=K[[3ǂ{$$GC%Iҫ^OxM$8JEoa~ߛU|\_.OKjI<}Q;kS9][d3r}(Ws̷K>䴛ךWݯ7N]נ]qp  Ҳ TNE2zx CAu.rEd !Z1giLEuh4`-f ^ؙƦ딷yQiaE[9yUr>4u>%3qJ,79^_n;&_u݉+Pdf$ *1%f*IvE)`ӊ(L(m&,d&ˍz+Kt1fXww-2bV'ρ3fuNj DR_N]~XO9<HE%p#VqVJ,iN?f{!Y$ZT‰O |qVd?e)!E$gDگLc?i_▂4vHebX帾T/x<^eJ׽M0 >3Kr6rw}H6TW,o2تvqdtd U:"ZgtS)G ,͍*7qEx!eX Cf"W斐cn L=I e)GuGB DɸJB-a$p4D &r=:e\ 5*E8 g WDG2u+'6dS6M`n A$JÜq |`]B'hh19= !l_l#齳frAZOjeTrJ[.p$͡5eүMoHS$;i |rHwW~07jqR3qԌ㪚1oGwCD8pEXSI~SrUt,S7E+c,MJ2mGozɏ}1sbOOb=O(|mV6h2:B$:@Pu M.  jMX/2($2Z3gQ6Z5( +-њW) .!`$eh#6zZqHe󨍢ᩦJ=T(0uHuN=F7Z,Qp*imlC=Aö{B # u}u|f;NrNoRWޕff)Oz>*qa(РP~~Gn>rDoDZyJ'w~kՀA5;z~yGq*˜"˾xT}w~N&{gv2a.hMKQek 740.H|}qMY-o ]2C* ޟ{cQ|id]V،氫zo)yG5 3K#Bzh`46ZM ~JI! = d5I.PtWu %#T.Z LQ0;]!"U A SZCY2D ¶8PHAXAe҂W*E9Qt|+K8ςm`H= @ XMxȂS:=Qĝ> x8FstC\!v'ܚ9Ϛ܀KHƏ`jb 52Y(QIHꔂ]I $Ίc"dII 2%StB$H eŃ#Q>q8Z+N0B)ӑ aLfN Z/SjAdsrٻ޶UZ|@E$u^1*K,KRJ].ERReqvfw7X/U atA& Po JIT!3ꭺ)Cl#В"  v O-I o8onxJ~#-ה7Jƛ= rZJn.,I`( 1=JLff\8SsW}mqWnj1ϸW{V>bQ 'BB"qsOSZ cy"!=a5(QRwi$B[mΈwY8 nnp t4 pk#Qx3Ӝ\7)c}n%u}R{(Fw~\^ B!IBr=}d>iٛ^] d=;y{oe*Zx~ti89B^9D}0őL8W6}w0:ta5fݚ@+ ҪFލA>ŻuEpFڋBXz&!ZSD97,ȉӛIjl1֋允w<8z2G_E'>oW~h6w<V^aGR/DZL,QgJik$n liUOŷ`ӔG -C%䢽S{TD^RX\ ~$rZ7fu (FN,/>w6U \g񶻏0z@0|o^R#YwiηR}WJkuG̈́>TU=6;{P|nڬK>*7D]T_5ڱ E͂3\:}%hoG?3j!^"ŽwB>x7_@.ޭIikͿwk!^b_Tn*;>:*|SOw EX?W_ܶާrz`@"Oh{3j1aq(FN3]x,`l4FFC9?=SF+Q{IxGT@;51t2@% KU lKa" tEݻz"{*b&{BPe~?z=X P7mK̈́qnsD)!v(%DRH/PzHѶԄ J26(r7"tH:=wfimKͩ.uA91;"'E\zXѶԄ/=o`Ri/PJJsY2~A9B;RXĥ@!G;R-n J Xť:eoIJK=ڒd[#J!C)RC(d#QJQcHڡZKt(˞p`I/Pj=R~gR y(˞J/{OgRbɿFJ5ew(%kԄL#J'R!H^.{*ea5J1C)g|ڋʽR& J١ RY/PzHѶ˪yr;RQ e?fCR&Rmr( b6(sCtXѶsG]M-k(@]MT)T.(=kڭrU_R !bFkhvx?$\fQFf8"4fBqE$4$2" ˆҔ*J"N8`}@Ρ1AsHF?xLWꫂi>Iƣv ~+eOOC_EVjy;9]*?\o!C5!\4,4{C2'2W4^>0VR.lQroU4Kz(YH򾽨½ˇ&d!s;X+߲MS_yRj,̠ l[G\x'% 55\"KK# Äy\x;;*csKsLIe'FyBh:&&hn(APVK(ڷ$̵ww?z;/m KH'cDhe'b}ӫϋD}eu䯯ľHJBV(T~"BW$J7J``)ukj͌m73nUT;F;+T>[0ҢAjw/2޶injEq~;֞jXhߨ8jVY2׶ Fڞ~d,dXaRyu/W:a_3k/T!Xmh98I:MdէM[`81”LqTʘ(L2KU@$Ōda=%*ђˣڬRܱ KoU SPI"@{1J8)M1#ːX¡ Q, &ab/*j8y>/}-RsByU7c(q8yjfj7y)ۻo[b"K1PP `jdYi  AQFP7 $,$ QB C2.ScKB`+=.IswcX* U}A(,(E 3'-*#C"޹Cڥ YTK~BR<;CC=A,O}MMT $Pa0q j⍏LmggNJejA8i6ܤƱZ~3yCn $I%)I 4Ԕ8cP]w%,X!JqgJ @(R&G<ĩH2a& AaHh *G<ٿ?9is[(*C Ɲ8N 9#GAN)IBf)MI$R3AqpRWzX 驭Մk[b,F-X\7KҢG晟%W]wްzHr8VN+锠I y<m߾|[}41e n=`q;F+]>Ϛm `/>l9i4 MYwS]&@/8{$_ܝ# 2o q跷;rRޤ%}}F;R  䑇^~2Ǡ=x.[%`1yVc߳Oz~QQS~gI^~]*Ձ崵q]1=g')H@OSeS FqódIR>/]?r0昪[=z^j=KÔ^HCy”A9zǻQlv|u`gx.eSbڻSXz&!)Fzqz7 z:pn]fTnޭ r5eXKpz3IC<-z0[\pp;.'|^OZ.]RIiB4r m`4Y=KNAu|~t~ dt>u[N!ZQ9Y~'{fTZXX877kqf\;y[}m]V\ݺhJ{yV4gSU4ۏafnW6UjpXcraI"pE,y$@Q!x{k\TBÇT;6lBYs[>RC{k"0ߗPFhK.JIkhÄ7Q}ޑv;:ڞ|jgTvrza ; SV|w.bP{ֻ͛5WQ/LGM.z7_@.ޭ,K$Zndޭ rBpݪ7;~Ö\dF:4_5cxLSnG%c?(+͐l* ^7/3 U}U0 s0b.l\>&x:jlMyVǯ?7a\_CćjPCx#)f2'2Wܬi+)izҮ?,fɷ[%K|̚EJ2ػ6r%WHx bg`2,n6;FLNfqݲMbe+j+bճj ̈́#,SM7|)YłbÎڪq$.̻_Mַ{N/1WOw}37v!pY9Y3B!ϑtޙPYO$]` 0G[󧬆(H[Ar< [ *QUHyNHPgbޡR,~JM0lÀqRRI˼@ HdAJK egP ;w)mlWn]y\ 8MZ"5z)F*N $JhXF}e !o8*e!"Ŗ˟A@xKK:"!8-7 v%i*1PҵޯweqxxWx\Xl.+Sn+OHNtba": ]pP1-r{˧{٢ ~ǟ?uc&yܼ{_/$ŊoѝMi:?O[z:/(n'QupzvGʑ2($T7Nf/]=ѡd!xrIxKac9ƶ$, `xLHL>}sw#އA%6J'`[j >;;gwFN{ 7*/1?NXqR&=Z8!)S)W84oxZ:h tD;e'pBN?qj{-LtvJ4C֤Rwywj'XyehIfD*Us5bOP%nd-("sV x{5d*Mt!Vb4\5n h~aoaF|dwAbXtU}GƟES8uƟuh+WRuޙܰnx ۴n2RwTnS?u.jjАW1:u>ٴn.XXN;XXsںWNnuh+Wu8 )(#3eau-}νwƙ~@8UipB`4ZݞL8BHjqsnU]Tl^H/ߪMzoK*Scm1=L'_ۆ]7qݿ3^GWNAx|whƒRAC:E bJhzUѱt@zj M{+*؛ .u ¾̷P}W+KƉVgzY1+4ʿuQT.?[u{%z3'?&jy psOUbjuXQNF]OJRԱJ $jإiܹ)QM ׾cӨtJHDӫLLVX1>yePXފewljwqZJ,QT tRdR4蚦Th襄SqsS $S-pݷb7T O7LAPfS;*%ߵ2YL%R%E@w5h+W(b+u[n2QwTn-z_S!n[U4Fb87[GRce:bڌ"@RZ:4䕫N[Q}N5c\ʷu*Z*8alӰ}٦!#6%$yB%wbfjFLѳ4X+vbFhSKQ}N5 _39[=NYKQ}N5u)uY6t7T' 笥4).R:>REK[Kâ'Z!3 - TRK섖R՜KۻRaZ .NYuYK!8e ;-v<.ZzZ!LK1qNh)0--fJ\R"´bOGNh>o֩ve/ZzZ $LK R aZZP0筥aGaK9*^FO!V{x\#+ݡ` ƣzl,3d[f@l0"tzEFRdq|iqfq7V}oVE2OsML.qޙb"~{l6rWԧh-Z',G5},F(F0 P픴 I-3.HMPj)A3eVa(~@9-x^Sj0^y`[XQ0^~A[9Kl$$u@lJK egP čW'b$ EeIzD J{Oyr?J̒:Y'-_Iy!uA A[ #!8FOAA^_CCB$H~o+4K1aWIu^f:ʿ}oyYlo!2/Y]2B&%֠X8 R4P W"$ˮq:-E.ܡu"7+` ra aX6z"xH $܊/R_f[M)ZY])aD]\`(9/zXw~iv:qj0Ճ s=\Sz /gsI"řob(NcjL(-%?nsbH I59MӔ)ECH0KNA"?Ot6_aQ4AЛq2} ;KI;fY) ۽Pr6ZQ>q۠ۉ,]ǁyg!hfE۫}@rr,jڗ(X:$)/G[p*ϮGȗ#BEC|1A߲颈%#ݝVZq [}܂m=q[58Y X>8ɨbEDpjD^w¸T~l哵b܆R4bKQ.y!K6jTJQQd±:)~nsfC"sި|omI4w1ok3lM _3IA~Ya٬Wгl~&bjzd1̭:O\]a"!~c5Q^}5xu+Fɸ7דE{O?,S/fCamL0Ͼ>,s`4hS2G`[rmyIfKo7CawSA _U{'[i JыWWJB+}QV^0= Y ~X2y?{-E$WHK]Gw/ω)CX 9!/0ԵZAXG7VOGpZ?Knxd5.85V̞-sF]K޽"$Nz_֖kFu0NC=Va(Js~(5x9l\]By5)йD%/q?"WiN@&OSPIjG!qDJe @tgzܸ_;3L aY 0YI *MߠVǛKjJ Y7ڕm;sayCPq7THퟛaYf=dvqw}hps}M"7°{- A\*B,W8& J?0m^{QXq8A)RXmy$9tYaԔucJT˒O &yt71Hκr]KM~u1E;7rgo)*:v֭ۢZVNޗIU5q0ݑ>9}6WTp.=کzR>aDM}.%D1Ig$n ̘"(^qMz$DYZaa`RH`r"xYY)xS,$hb SH[`DB <7QIyvg pG#IhHfJ_Dy)yN,kt0˜O +KXFU# ºb}[?%~LjnQY\fW_-l=\-W] [ۖ4RL:BYAvȥ̬"*ڸ9: dB1c3mHLXlo﷽I `{[DbmrX˘%0I2yFC VLg[lRQLY­^ V2h#2AH`w5`3ӝrw_z`kw NjFZi{z<wӅQI߇d(`<J~9oD ޝ/|{ "'fJ:elU@(mRE"Y?^n:\S }Ì4$c0JY% kJy",ڧS ý5RcVgtq((OUT?)s{rZo;Ь^% }[Nl(7_}&ws:ڥ$0J%&=~ Ao/(v[I7|#X%Ȝ*Ȱ$Wd%0P^B0*@>*2CL.JIŪHiH-ʹƒqRZ*3ȥ ܲR@N,)Pq52Tt1*-Qqnz?i)|h 1#NE?Pg 8ϣ?Ԍ-FT$g!4gMBTV$RA$W@zf> mOW#v](6c0ͥK1/ߧ {5(nj.FDZc;k#Qv"rmm{sk-3h6@Q7a8wُ6r6%:$><,|Y*i\=n{ӳwW/ogį,ߗOՃ{ދIҞ\)ƼKONbvi(G60#skq[͒bBX=nz0;3QNa ͤz4ν'I(_@aor0l;;CUm&R-hLlw8-|V`@X U?^^7y7Q_j/oK.I s+Gx0)דՉğm;wh}_5q0uz;?q޽q6Wޗ *Kuu/~c)W\pSـy-٧冲]9Rcã]\ǴĐ84Jܺ;ukA u~#źr/Yb"[ )ڈST|f |P7R)w?޵uk^0Һ[0CE08/Z?-ٗ-څӻ+y fb`HAu! ZN// %|poIR$vL[hG.]h)6A\)bMXZa0{P:G+J*Uð í[c92_]3|Kvaȃ[mF'5%ח'IR7.Q_j)e4IjAtuwK_ JM; BJk^3q%-#/_axn΅[ - ;/ sXXy+V*ZVG ,I M4ePz$i չZk#&,!]A V &YUDw#pUOWrsu?^%$sԅ%RZ \0 Y8?KtShyg+_ۧ-dZE]mo&_.[e0G)] J!Քx]\lr\ڍ/nk0_z@MqXLszFjܚenϻqZi<[̯r[ &>Fn"{FPdW' _A[fIZ՘CER!v{d G3/8MAcܤ;*%\Xj'̨y&bnO@̭cN,DLh vm> vܑTGR8AR#P9Y(r>s$"CR_lK-(gRt{|8b[jjRq,eJ+MRq,RkG[z,RV| X <ԂI#KϑXڳQ1b[j,=cȠg) :NR_lK)Λű2o[ݯeRJj!hKϛLı*AaKI}-?gR\:BAR\+q?oƝ8`iܩJj61Y*ސ:E9RNpuXmQq?o2R\Kc)WҸ*J\ qTWRsF[z,ERte_xfM݈u|}77$!6MRBL= }x%Em@!@J+ /7 [B Y4ӵ~ʄZ.&]L7UE aJ?RviF* *Cx7E*U-bO(1lSK)Zur8SL N=SZXOP ϓ\ foxɰ+^|+,r5whI1հcxXӶhNp},dYI٧|}9 }-Yk=yK;9ǚU{oeקzlYWQ&yf@p6۶U A:`R$(nͯvnS*ؗF#g!Q{v*.$o$"?RἳOv~YOWWX:X|Ic%BkQg%b`iȔɌPL7Xx'\A)7W;lNs83od%OJ"#CR p7I`x>V:'VJm)Rg)+)U( Nuq*tvVvRW3pմZq#Y-行ê4(Z ϲZe*/0C"un*Ëi3+78 Y&2SY蒕\p2BuYwW&ua\43j( ߘ?ˮ_Kyuo'O=W!ÿ><[:# `sA5ews[ӏ_߹gSB7|J=TNw[Q:*ߐq,1y5Xk<>`.}:>#s;ܮƽʏkM!.} $eB/_iCKR4 B@]Dȅ; ڜ q gZvJj6/ VdIe朑muzFkie2:445{xBݬ#8q ct?(uC ;Zn~AO6}XDI޼ߓayߺ۰nS]:TNu/jQuz}SASqwJwD \ΛQt3ӽb6MM "C:hr޵~nUgiހ7o3sz^Dph$8tF}v}Z' EÊr9.' ~P N!#39Q<6A.K$ G.̥m*Obଳ\;o~Kb[<%IhG,݁I0;&lJtMPN4 1{Ē  'ik*O$$#R!N"&fIRdVHIQ;ɖA2ܹts)4n T#,ל[ Nbв9EPK?QWf9~C=8O(rI>)Wȴj0/opl=ud9WZboI}-5gbl|oddol lA"G5K~UmqmWRstdYiKVؖqlU{,呶)RiK+#Kϙ JRUR_lK;XXKY!űOYz,c)`R= Zk- 6=n(-E)i Lȴ,,) ˄AI+RToPk栨&Z*a0 5<-oXTUE܌( Y_Qg4wW޵57n#뿢Ke/SsIvR%9.medKΦdHJc&5F_~HW~̲!_O:B#$WdܝW]7mtT NlokNk *Km]]'Ip{%ov7,XYŎ@"ɈKɎ1^vq3L)1قqX.2ٗD&~&@AP5Mϑx 6lC@ºCe4ʟjP;oP0v&s* N#UADPjo ۛ&|pER8\c?a:qC0:(U',!D&,WTjIŞMlWbߥMz, B`8;88:wD3@{NgSYBIV=hSUº0rξH!v[РRbAW B F`G.}8cM>b^#K ^#"J!42ke0]JnM(/bu_\` +)Пoybe($ۗtН[@wnݕj2%L 43P \q aTd nϙFD`g7o_7yxi>x+ZЁnz.y#Hĕ3hv}5+Li"u?Z (|TZg )pK4NL,W dJX=ewzE0Ud9Q~UY0yp5Fwz1P!`V@q  ]A<$bV腚ޏ&lμS7)(ˑA:\6y<}S4wGaYzf ZHI)bnrB~LZET`̤RPŜ*훶ʪ{`Pf;%# 7TT$UJQS@::g9Vv@ULK"0'QjpHA[[Rzg.[M8DfTHR%GH&2Hֹ StZĂrC3ݺY"C sE]S iMJ0Ꚕŵ]3FScNpm $3X-zdȎy03ץ{]Z7co XZsj'44Ö)3bwGiPvƊr\W` =Q!F{P@Hp.qJseSqq HfVF""S[M0-Z;ׄI}MRg-kg~;4-{w1`՗o7/_т|\ MW|=1G? ]>%&e/߁WV+O ?<w q ]oJ53jd0ξ]h=}Ekn,HɾMlbxswu 殪/]wK1(aW, Ÿ]Qpw+G$b'g[% ($R -"ν%[F0H)|V9w II}aBv(3u+ B⮊t黫g_byT GMbt:i0q;Eb~Q 0k ؠF9ŴonļiF>onxzMSS$Yn%- m,[+0>/u*z-u&~0M7 \_;t4%^&pQ 'DmF̂K婞ZacrZH:IJGNf #n1rJ`E{]u^]E^\d9CE%ш*;X4xht㢼(}*!4BVĝR h4H´" 8~&=o,$Y;" Kj#B5[s7v*~ࠀ~ ,mUfcK(O5_T3L_%Ğy -豷X='drJLU`WkħV , m,곅s'rjQM%#mދͅ#V@ ;^#O7v2bE#5qZ 09W<Vb1C;1  jn  Aڈ 7}36d%F;rpu"Ru=y%:׽k:?=gKSI)}6$Q@w ix UbJJ^ PJ\'! '0*@R*Ж I~@"@Gyb-e,ɗ22-ȼSLt. J[id)#)ü8s/fa8]ҹoat<(eםxMjcBx]DJC|0jbUBY]OI6#9"^fΒk@Lsn[P2{c:2'T ja$8A{"ڻ_n0t3om9Dnlk/~I(X{}>?-18vSJ߸ӓ`B񏅩S04Ӏ`0>RsNE s`#ڝ} ,|;%*$s_"040\&BC,Mslf(L!!TiRS!RmT,`0^X]v^ =nB};hL -QdkCuL 7HjÔ:*Z\0MN6x'v|5{}Ot{b&TrLϟ6F$KnXu a:"49`Xjt$`]^3pnG%52J֒!O"Kil2dB&/%u/^TAd5g2lBPGȔ3b\oNȧ?aOF+To>(豔A\?*;8k'key@  'sֽuf'ٹQѶor$pBZ>_d(H`%g;`ڹhGdr^^g_{.[[It"#EB>G|:(6tfrl'r.wr.we j2%:bYjTua`7e ań@D4Td4|>n]J0]/tޢ=d[ۯ}gf@~s1't'>'k>,s(IL " ,d#݄ !4\,5\;~-T{nT{ ZSJ-5d xJR+J<*Q !h*jX`zgCU sdbωuk/TpϮj"]N,_~N@Bο'j6 "QWq{9D+& %f2L\ѧ @X+ K!CɔaAu0ThY<(_P&vcLÊʳ}@$F C,iwkûBf nV $X'RIpk/9o0tXyA[ @\y!3b?-^~e䥰c3l YH ?'4d W,9#Pl0[lG5V_}Fp1M &DAd)R2 8A;:G:l[7 ɉ0Sf81NISn5XB֩\/'az3VM[;*GPY5-xֵyvnvp#@R>f y]bA^ǀ.pM.o`7yiWE;fU^aݑMsUZ, px_S%r9@ 6~Ri9DZ`w^aNad^ۻ>~S8KQJ}jfއO%H~MICeW&~fM[=g@Q(mw|U_W5:Tb,5@q Z1KDLnϵj%@]GY:,^~OUΝ^f0B<2;G^3Vո mD0BwR洔՜ܹ7.(uNjEXK$5rQҪ*K-F}?WuvyV.g)!BXT%qsPpw?ľ"/`D/| .0Is_q^Ccv,$'*{Agj@m)'z> 4Qzc O"bl"ni8$ !޵6r"eiUObqd!ug|&jVfWeid0ۭ*GdI|m(<]Ś_6ADф UK2Xʫpԇ.WSǓTɂ *ZJ"a&s= S1 ;*\^04߬?ôH[o1[OUhP qMV/l)/ :Ʌ`=^گ @ ͓͕M_OɎar79G-ᏋS="NC6elGePWZ)aʎ9)|+#>iܳZX (EО9uaUXp2:qG1ԅQ'QܕU=U+tzi osNvaSv'Do fW62n g:.$HM'?u7ޚFkڽK@ ]X,A1(7NQp&M^oihN#}4%╯˝=! sMR;P9c%I-)|$jkƖ.LdTԏr-/r&SG WX #x10=K%R|d 5kx[^B Oҕ<1ǁAveot}{fN 1WO#84w5tjd| Ӊ@ǜ+}Pg_5 5b8r_,t`k bNYFb7]ՈڭU#P(iS62)¦A(ɝ(EV9ƿ$ %f h8IH!lXk!r* (3)5>E!e(ƂqăsLjoEnW킝_VHXgj{nj_Hnj,7Rr)7dL 3Jk15p!:Pt8%&kp\CHLBqy :&I'dB  Qx)%hZ(EDA#Q6Ӆ1r:0t_?xm\b2_t9@HgW}}qM ;o/rBF8( :wPW7`O ynh Vh L$:+:묳j޽ZA(6E8A6_hN ehoBF+p E&(@/q۠@gBP(27*њC*.QOFr6zX ֱܶt3m_rC4ȐC!Yh䠁A%H02& L_!ɠR%YMO8j#`o?lCS[U\}@w{tQpl2f\}(~w}R:y02ц_+i=&aGQNsD ?gM< Q9KRbE~7`:?k fQEu# $d+\PTѼXp'hbv [lnׯvN׬KF c1,̞,(Dy뫲y.P#IF@Ռ jg34z8Xآ1};8\ȅK A~ONz=knt>3l;[Osj<:3lAFrn/8;Yy1&=hLj@N.~lx|ODE(b qqfV&M1(~^BKu~!;/jy :_܈.-.Ç>b,Be3j-S'(.Q 6r6.&CJI:(5L'0l/4N:]myRn>^ϧZltoE"43 6btTq¼VX܃O{QK\IC;Pm0B!6dȼ+W냗wTIT_޼woԶ"<ʇ &7)g9lsvon͛Z?3PW[p}rf=׷3tcNy~7ypg^6\r@$ų:[R0גW?Fb^yKOzF}o$!\DȔj^1ݺbP":]h^jA7EK[EtM dW$.BkAQL1TEՓmT?oдC~{_s~^^1J.jյ}Il 5lKMaYmH6Os`l(\Lf&[[X>z1_0l9ZxC+x+U1% hfp=GIfrZ.g擉|YLO!xf=$mh5?M5]=.\n#Q'+Yq./nmT,:NК#7Ѷx}_߰U^ fFa'4=/Q7.oMnp)pcɌMp>pk'5W!]*+;U[y4vUQ=νO}F*lLzX Jp#AT{e*:H#ק0l$ ̴/p٤yT\Xj̰c ]'掦Of֭/Pŵ ުċg2&)Ik #5aɗ{3V1-U u/,,,46@I H  ]Afκ>qނRE! G ]Nԩ T𨆳1a=ՌTRvZœDq$t҉D@t%82(6z}CPivv.r jKvp1^H:gػi%^tq: ΓPsjTIz44u} z3t WFT/,Kٻ&7n$W)6Cz5f2 -ڭɖ7PKXV ]e"$4b;RhV9)Mh:+D}8$]ʷ959%=e ^'QveC~-xѺ l #ڳyīyǐ@Mw/5!Ȃ$RFr@&CPbLmHpU+?(*5GyxB* #D,8$RZQ'v,F?W>amgEp -^{-wT1ϬҚPEZMK *"Bmw"pBX ;}^G$/CKj:6T'rx@c/~٩(i2{!ipߩ(9* 3RQ;Trt-FK(prT^./"ZOEu'aP9m~, ƃܤ}4UQÉAm/8 BCl ibknB gi(Ԉ`d"|dTk'*"E0W m,a m~G^'/͞eK>T 8FI;-P}x%-Ee#%4Bec.m;֚i. \FValʈlsfrH.[ SUBj 1cXFƪFE۰ 3qX[}SL 1ӄ_/ {Q M2"lc&pL'Ԃb;^2 !Y,HyȎI36OQʓ4jV~檀ٍӓtQi>$Ġ>%KHDD0L;ˋ 0akg^^uDj^fyy!RT:FT'yy/]PƄQ&3yPl1y9ׇ_ m_.VO~Z%6|7DWY.4^[ቻOw\w r(%}|V4Ӳ<~@sGI=yB [t6-; l(5y2?<y<,M)5T1`lmЖdCGnz=2qǠ/GmKمXƀw]WK R> 7{l \IE- $ WgKC{pvO !h*\>t\$J%W%-{!٫ʵA+pTFT#\v9w.CZV$w[?^?^|HU/nYac֏E%$,GbR0OV]-m U3U%x`Jm*czWenSk/1kPW6^ ^fd01K")8`F+ YD$ Hxe/QKS57UeI@ll)ֆ; 0qۭ3ZLYc=xv["Hާ)靍 qS(~CqN) Rg$u K5CSpoV|u)X=+Is9j2*f /5_o|߫xђ9Rї1?V߿p~Rzr?1 'ѻ_&OOLɇ tZFelCPFjW΅pѝ7RK^vxj[w ~O?LQQ/4)7>?殉qE?f&<>۲MR7x$qJ,~7-~5͇R@ry"TV\ G0lo%l`D4@SR޹yE%=% pmdS~nJޭ))9{S!zn͛N%z6,MMQJi!>z.K\Tr9x|bK'5rdc}j[6$2)׎{2)Zm oۈ]0< -FSz; =2պ2&}8Dy9PuZʊQ,\! 2PG\dICZ`0*!y0oT $ B;+) ^IKγ=Dɠhk9l"tZ^2/iМ1Jddg?՝3˙16௟`ιd"]TL,Gb7ֿ953@CݦJ' ).LǓݹ`*Mժ4џ Pi"r nq;71PiBf% `%\QEq_o+VЬx%*/ٷ*N5fnTD8!¶JdyѢJ8ӣO T8dj3N AIZ$eKVS [b1}fA؊ܵg8~ؓə:|7u^=ϋY3 ;t}&+/׋[."DT>롱5 w:m#Xfʜr=)I/keA5|,0(gS\5hHtM9%=k@|Byl.CzyN@Y09Sx0:QuZBAiʷdw!Vwm^^XRBm',9Uݶ?sNo9}r|r;t@c b*40LSNb;^$Ȕ/2";0SI]Z"v& $ K~?%0џ@Tx  76g~[3aLQ:żYț={h'ڣRq{siRI[ڷXgr+gq+Z`+f~pD80rPo;)qV6 jSv`LȎKGob}Aa#./4ѕ.1hoe(Yz|}g~]]2WsyDFztD6,dmPf[GGh5Sy{,"vö`6z*~o̗ۥu-V %Wcib! 2F50Gu>[Z%?ju7N0n\Cks2-a޷IF+H n}q%5 H'aJN,%h]|B07!ͲM߶j;sIZu%2DTQZo/0[!X̐9֪RIQNJ~n>5fhÆ_*l& Wx!4S @RvFm^` 1f0h16Qʱ3j!!dFj(7x˾_|kjLV$遵![rMEwG7~j>LKSˆ[IL?I|*883!;<%:xQ-A H)ӚC%Vz=enhto'ȩ~xVKhB*BR" 0cp1FN9s$Vshc9o|ҕ+MYƮk2Ѻנ !׼6[;6[C`Rn);x+/VQ& '6&b,EDrMP#D*ypo[P2%M \-z ${ n"Q;) !4 h9`%B5p :,cEϷ$}L[Jm^{Xk*=b 1AE% Zt9RA5JUʒ؏ eĤWk.Ņ]˻$b#p%C 9.ಐ2lOw: ÑzTQr)x'?eȡW3HĐP,hI Ik<9I6/>ͦ?_%ig;T= с3V֓Ygχ\3Qï}硐^O?S3P !>0L~gǫic!#GiKOA/$h'dHU.#Bqtxl 5H{*zr}&ۤCS默eTzt\e|){5Us"A;U ZV'Z^~_@,ΜW?BtǫVz0Sd9%Xy$:H! @Ya  vTv}#8/@ P pDkt@ ^ĴL'JKH@/#;Hƺ9ahͦt+e <2҆5j9=2ғJ塃@SnٴꗧV %'旻G/{`=0끹[Zafh2Q'逄!aܧG ,࿨0 f@(b9U͇%q7o~F X쫢}8wkAz7H} n0c5CJAVe+.؎d^8zD0:OXkmVEDū&s  eˌ6e%y'-dujOǢ_b" ;KtzW-PJ_t93œ?A߆ʿwzߕ/~򛺆]#dwǽ{)vd838ɥ{gO'#tR/Ow?M9 ./7 u`̬vd`M??,x.-ڲUo.v"x`r:j=⨒ȋ]AYa}"ԲJxP5:tJq _s$ Plc)!_^S;,c$&š 148@.? r2 AȨc$/D|6r^6FIKIĚю'0{?̯.314_{GFR+} v2c';b0KA`x՝rU2os<Ë2S=XNzx*A5Ϣ%(.YrP]9q'/9&ȉ%g.`*/D&/)J:fUZvvmp6b5@)m1/Ϧ@.$npw l0Y蠍%BF[c#|uS[z>mB_'cUwaaG6~|UPhӀ.#&{+}1ZչG:|vp/ ~æRP~z;_]tXrYm~gbg.u_wp{[hKS)bB?{ps\fzZ+)*z>F!3,Sf  V Ɋ8VMHqpۄ@ BUCup& 5i"p^XTˠ"6Z %hl VSgD[jc+.݋uts +Ũs~iQ5EIwƈEiI\ZkIa P:"$AU; <hV$%3$'HQkg-AF-#my! *yFY- &M4o33 LtK^? Đܙxn>/!$twpN"F]Sr柞 zH,!W;t~.Īw<;]Ůq4[t%RB{j|yC4GnYx खdkF AM}Ka61I:@(WI\]'nU1H#@c UΣA[*c J i*Z }EEK_*,3U_rtJN%:FB ZWfl4r=S)X+izU*]x1FD#}umdzDC1;67@8$`mqCz~ 2FJKN]iIk>\Fꌂ9(UN4LzňqCM9Ns1DoDF06k"XD3iEreFDjg L':IIi-Hh6UIJTW`yLAg19bȏFO,4V.ggqs;o$IZ-g|5fG3=b2-O%F8cc*ʦ%UAږS16,po">Z㭐A祝o Ĩu1ʬ`f UZ G)1u @JKzfchUP`}@9Z6n=;RܠϤؾ(kъo72vwɈR2ʸc+ry:%R kKɘFĖ fd?6v)3 ^!z\YY d)2 `?i}c2~L [2w@@0 ]r3HLl>l#oR^ɛ5U&jzbv@7=Z`Hzt8&Js%~3}T k{, B~9N<;vqUaLh ⰺ_J@V.~k }^\]<EXB5ϛT ,\i-P;2IcB-T[$0{ '5kpS,ϻ%WYsЄBku9FѶFzD%ڨ4}Nu۲SgN7/C'`@!|y^s©qFэJ&תsEU|$:1EGXP3RźL#ʉa L:/tLzJ*x&._}t(Fwwhzi)ir=&dҭ%sϯCn9,8S7C7Z w4p,8rZDOWCW٭{v2:i ЎFXQZZր|%:6 };^]Lp{T[sqg`k=VrJ_ưqbQlZDP.Q+~8Ri NWޙ.0qzY&ŕKS6%7V=kG$mp4+)%dˌ zOޥRUKw&ģ_5OL]R &fPpiE; ]0.l~6!7)>Qv_Dյ8 kizR3ruN|&Z (ҳd>yM$-vS_Pb#:hN}%6k:v+/n]H7.E2e{MnN;hSFJr՛v+?葩ݺo\DddT}(>K=}󄭽+T4oK[u{y B,A(M |KMCJ7rkgPRsЍyU ԼQ 2eb4ÄJ Z؅?s3خdE}ndŒ6\r)ᙦG L֒bX)i*/AV} oHzB ~76]G5.s8OR^nミ~jg~1TRh@ܐ٣%/{Bj ' ˿}8$78 0a7gi<7ْgZeRwo' [SbX/MBjjr|HMmTGp{#8$ C!7& )s *O2Nh%Gy",ZzA(#.{-Fr#ΦM~$Ս>ZaG ,OJ1{!IiI䕐IJ(@7%LBJ IiI5COR:h)kmFaz!/Q}M5tR3R.cC[RiT*ՓOJEf͌\7!R2W;T+5٥ÖR,KsJKT_mSt鰥<)%2S )}m9#'jRJIR2X%.ȜM?,f˄]mF0ke+լMD x ,!6/7R)pB43lin=ה6S@)mLA@-ϭhJ$ 蕴[oׁTٷ"Rp!R]G<C2]Τ:_]r>ݥ«6=;xRKkEL3is@)8_O÷}_D8p@qQ &F[hmT;Hgl;UKda4Yrht5K "yei!.Mx󰌟_D=," 0Fpd[ۦ,:Z yy%P'pWiOC.v}~Ϫ(|u_W~*!$SDK1|{>7P>JhVe T\.cqO)/˅אJC:A \nOoo-괾(&z;Pm76F׶0?|/ϑEq`W\?3.˟wmPt;9MVLHdđ"s@,j4!g_* ֚yNa #BOwv#jIx Rx/VO/LVùZ+2ںԪQZ44.`|ɸR{:[8;yq"ɑͶ\ϞQ$*8ʌֈ  20U2ȀU(v fH#0gц<h^ľD6Ռ;Xr4=W}(2WTGE9',zQev [J. z!yu9TGKMR:h)e"OJYYچRDT3zZJ!AbzE2$`AK)yR2CRiTs)$Ҽn{"y->Ts22'<^xm9װ<)X )"OJKbR.QVWznPZ{\U)fDt]mV{ԌY/ewZ'D6_3QO=C\Bu(mB%0.}ҀTB71g:7H DvFZ&sI9vg)r\0\(1A:Sp Cl%^XK$禈E*jMDjP`#;óxd'?z\)k eE`RHE)<o:ItǷhS`ԥn,J3%>>Jf L~ˏSQŞ1Y}­xB{p">LJW~v(]|^8(D`B f↌B2Ch= 9ˁ5s_3w!֗H/L`r9䊫Xj=읊HY:e$41p l #S)e/)(αFmF˾:b~8XFe(Hld!A3 U8a5h`Caηs/y߹͞1iW3JJM\ %*,,bf iw,s7k@(x슟KUxL,GDba'"qMHBܭsJh)Hj0{~+F9&}m'nD;i1_`BTwnXg5b[o“ckID]}:5Å! Q(eܹ-6AyAAgwnYtO;)/g@w+B;\gehnwǝ/QzET ȇ8 (N(C[[4y Y{ut;bMQFt 6P䩰ZUGAUo 6t1yc|LΒC 6CtJNk9'1-"qò3]DS7㎄z R1x:'\iw;NW+dyZa:G\ ;ĥ,Fv L31<j_NGVR`=D!JI_Oq* ͵^yV8p.g)~o寫x5^ۛluY$9(1}7>h\_?hVrU'h}XXe/ET: N5ȮS j 殰FYL-pH٬ 1 @SE^2S+g,y9zxnI? ykQ0k:R/?,f Y>Dmf-嘳;_Mo\]:!*=6DӪO?\}kXkõVǫeVW*I\|gZ۸R«@amm]NqXxJ );VII#>gDJ"[=nhfFt׎>ԥY#(\!hi0ƺ c02%Ev"h f3b@88EF E^cH,qޗNو^OQca'u6Sȑ1J4Ah;Q+m;֢+M|9]/_]+e,3bu8ATFg +m4 L6+Rpɹ (EْTj1!ieYmS*i&ˌ ImZVe.1:ZBK,x \18<ںm1b˻uصH +fV/ZJZ,yڠg( &uB,4 ZeZzh$ [*ȝm踏.%bd,yi yMٓXSxg')PX ʙiNEvXj-[zcmk3rn mF)a%~3Yވ]# WI TVd5V/nۈ~G04Jƥ󃴷 ]pUmRXh5xQ:L%ROŬj'~;RЂHͦ6ՉAR>Sr vڶJa{聾gGY 1RFD0ZCRYi@Qg (Q ?4W%} e(O?vԃgF,%k6qBc.΀r?uի}i8]:?۳{#x55WUlwk/)B\ 4릥 RlH n{d;(G{A &{ad&mդYނtDz7L[pT7 x-&+"km^vu#p1#hpeڈ|1:dEkיQ;'F޽M_PXdOŒbbU{ŋbi*kz* ͬ!̇iGjGKEΐ}Ap'ȋ*+/e>#0=|:~8Uv˕Pޟ=WDv =}[) q ÎOƳ5[/XOdc%;.qƎ{Z#nJIQs2OǑA翸UwE+,I-mx7@:IPIY)9d5d/dZ&*5@O 1VXӍYY-L-V-Ӫyi -̈Cn+zQxvj*KkOŚ&=XĨMBޣ$8[ 1;d=$ni @(z ~L痟-KwL:RV>fRTmA>xbpĥl;ܤ!_Pm~/'tFdqnb陿^]ѿŢ n8[ҷon8ws>/rR;3/&nԼιl=x2)YY^XLvڡgѿ1gR4_\NCW;H%H$@ݽIw51mRD2 n0H#^vmJW~޼Z 3{:GkzZˉ}ڥvנ>ɱYnȉ=L~Jny*usœSzs428p3kPu0gwlQ9ǽl1CL}2jvϘtyl)dOK}6k޹4̳<ӼI w/._etBwwZaUc58@ dk:U>H{nt׉fi%n TF۽w[P DȐl!q\V8`mFD;N< \+:kߠ .S<,nd3t9F nd1{#KaBz+#Ui4e9>2 pjI"w?LQLq5QO\}Ţj(Sqq(Iqvkh1]o6)Wsj.ɐV Mt>WiEP_IDtzӭwsŗU%5?{ӧ~eIA#7)LHlBs1mҤ &l"ƭ])WHf5/7C(!)FBƅDȣ!tVtrM>p9oDn٧)&W=X+hrL[ryqqadLV,'2;obg?Ƌl h4த~LW|hVwVX&9tՠY-m)ygu ȶ$ Nii\JCvp O߿"ӳσE6/*2P!*~:3_~\S ǟ/YszmTY$q#\@pp'βJڔFm;~vٌwٜf+8O?k;l:n c]5iGJW̏S`I8KEx5\}xpOo 8Ycw.Vx1/1ľvd3rެqYQ-ˊWWT#KFd'5:PFYg`y 9Yr *+F> 1y۶:ov%P5V)vxbπQu2"ZFB@'1!f!-$JNav`!r"*O,W2MAj+[m2*E"$ߗܐm'26fom|턼 yqGXJ<"fvUquP  F%FR¨ůȬ}Ƿi^]9I䲭r`Ӏ9}j[(EbdH!9bʐy2!1l%M[z xfc9AvYH( d x*]>aBx=W;lkA0^hs!T+axBut~UZ=V?ɡQQ^Au0L: >k^Є巃2غZF"ؖe>}н|mi"To]qo=lj+:bAi;::sqȄmAeOC~?3K -XwEe\"K 9Mo E&b:ea:cT=?B4JC}ځBu_|^G90ݵbNƮz]467.u(ǵ]Qh'r$K;IRIF|׷Bxq#%ђInc@`t;jƷ:m,hG(ՙOq]kAagAˮBsz(Jw#"51_}7B{hBȜNVEK ""+_) n ٴֲͮk =盜؀,y[S(Uýt`rW2QipΥIOF'(1zaʭW>'|h­z9U9u_ALM̢F?>^կgFuc 0ȿk +,G($q#ΗTa"MhIY9d_724z{mQGuQݢ6@$7R{$p6m%ea M?P72@tFP?jEJ&;ݘ{ kmF9XLz+kF'$:ϭޣ#WE;gJ-eVe]%_QfݧZ&uRIɪƕv֥`޸JfRjx=|LRwS&-1@ʑdYy 2dq`Cl%o6PxMG ,|`&¹,fC2-ri,)C ǖKޚ."(,Jh0hOz1I+[pVFC$ 齺q)` 睂~hHK*lʿxWY9,y㼳+yEvcɂfޘ$K4 ʟ|DqV 6{J!/ dR,TY~X]U/Zta/7"+k Ō״uYh.x+ت/-cYټo)a/c>}6o!:Gңƨvա|+ tsGG0qNV 'EE6i Y/i@f Ǵ~ `v >aQih:Z؆"Z)B 1P;~*a? %:'U7Y2|;Yf$MxP¢9@8Vuf%/Kj5J>@1u`hcz%^^gBA`]׽'=Nu~:%6zѢK;W)!}ۖhDz]RR(iy|um_!Qh hQ H |tim8m#KC5y)ZOY?ض\@6[Ĵ ..+PT!ThXSYr 9[I'NTpr8]S5Ikiʍd1ϰy.@ 3 ՟ x,g;ž%3Dބ෗hwgݢ}3n(w,|΅Ld}\S²MГ  I˟8˯ܙ^ΣVÄ'G]== ی= |d8+10889Fbopΰ;g8~4HHK-{9̢Li U8R ߆s$Y^>oŦ`S8wz4Q˼08w͌'zOK38B72 =[C0Iֹa?f+u7SB).a%=ܹԦMڹlж#5*o[{lQ;Qi k5:iGq iڌ !V=U hGV 1%S-BZ""Mߟ`3s_Ol!0ʽ\,^U7¨O@Vᝋ4kk 5t5*-ale,0ӘB~h719@2$s1pOԽ8hXVF@nf"J,ҞLn1[T Yc/7a[o`h[ R Ӷ:6h!$QkTPnF0}ӓ=S7YcQ??(ˡ=!dBP|C;0z)Ԃ6SU{ԍ :>EciCV[Y&uzNa}'ҍ9cfD Ɉη* ,>H[4!؟%Ǝع[@twOe SCå+3*0c{đ׉g&w]0H 4j|i[w[6L´+c%爱j$*11`M :qh(e%nQ?PM\eһuB͍h(鮮-O6!6:7F)2tl }e鴴˰;'P\svp|nVdppCM3-X1xA/-Tv0tϙ(]e41eym$* 4#WB0 /u)kc)MJRik9 fC ʃ_V_IkZb[9pu ^~8;gy6y:76VϾvQl&\T w>v!ҎggZNc[ kq㘮y.\( l7.vZ3Ǯ(t(\s0Z0|Gfk^zΘ2HVh?smb1Kpe{eN)߸09:PyxBԞxx~/|y@Wr :1 .ۉ+1nWFXͭ GӱL4~ Bw㮏5#};;]XM?Xӡ7㺵:Z:<DG=`bpoGԺvIB \703k %k;$!>;-9$z%r v#Ȍכbo(/"YRvo,C{#PYgd*7Qҝ&}mY,K*+:&CCl|*K.obt R9穕9=m7ֲski ~}x\ڎ"xH6?yxwM~]u.I2_Z&z5 駿}!_wc|Ko']l^a竗cgO?).{ÇuԳf?BUUPLu0H7uv;`ͷ:񒏾=ZiiCC`)aQdTAzCxύ #M8!/r>EQ\ȵB!4FXog*B14D 9N{X_Gs+VtQVY9F&582#hԆVA<MbȎWr2]t[c.c[6Z004aiqx6!+_{ƒ{k-Cy̘;9AUjچnH!uTݶ㎗NуMiŽY0@]H)@`g^(^DLi1X' 1m#QFڸ5гXBj7 u,EA2;ۻLA`.%[_k`a<"sxصPJ$aWzz9XohM}Hȧ0asiȌ52)ZoVN^R+ndCFS 4ei)U"]<e}%-?E,1N_0CL=|3D:dgś#`iF'k[%*5@dWmOe'I <x]:uYgࡍPc.s{lӀ=:A^ucݏO-0Kuɝی$j=&U'ܻ~x7;%`Q߻ox \\ܛ5-+H7 57reznd*I2-Y2B 9kuӯ9PE-vbFlGV!gd9(X'!ӛml$ҷRH>4Z A>ȩ[218v {^('IlvNДTE[8F+)v㮗*hM2>̱M;ɖ) O%p kV*oO[A֮SU?x.[*;/^~2%{scdz&R -1)--XlJz#<{_;$rHъƅ cHDA;δ2h RS%aXhvնZ6)NF[|ºJ_^º*@X:Zٱʌ n.̨ޤIzrpLW˩s%b\_l-&cYrh[¢sUK32nTI&cNg>!GʏB%zpb1 '>󉏢RC{ ;GO[5{ÇցBi8ș9i}=묍&woc5q{!@i.!*Ml[G"W6*`=2Qr+I6HAa5pK3 R8h$3UCc%oؗ?Vx6(n-ʿYB_At)J+Y^Z x6]+L^e}J&:-x xTakmX C"/Yqz{,n(Q؁V ᐔz$mAtWUuA:!D vz \jf<Ւ9ҭ9(̒/N9&8=%5UC SPNqN5|GdtSK|Rao{;0K:4$zTLڔ6+Ä {#6jB-ц9AT׷H=ϚLJvZ]׃MakooJQ&pgG'cD9.D2Q< "]LI5)_W X.5JOmPᖍ{|R?LV<(ݏ ȃfa5CK@HnƑSxrBEݽ)FWkgkAVBB:DSt^hhjI8SAF2C6FcF%:nHđJDLp)a(1-Lb(Zũt- HvĀ?N8A}6!5XĆg"N)w# Ŷχφ0]{DDIy wj /||S\Lcٯ['ڄ cwvv,S^c~R-3p-?1aqR5Oݷg͙:zCoȷtSZ!nu1cTj]9@jVl js)mʵeTIP  ialJ(6J65c68л96?jWk?CUg eULr;)K"m<,n]߁;?~fi:wh>ESO@1{|7gY؛Ln'2ʗl(u{^1aRGqVp{]xMJȤͥw).YQQUJ)`,?ҫ[Sk?|F*/yNj$qF1ȔKA<+xW7^=ޔcgK%1yj\"R$cPk(o3XL'vPۉGoP#8[ܗrlX}9Z޺jm=yE>h 1BD#C oA-ZccYApi#Ja6 ,M@;8I436f%D)]j%(6qIj A0S> r_A9dҀ~j'o0 @VZj 4`!U`} EYr0ҿ5F!#XߔԈ@ D5ⶵ^)6x BNzi ʼn!k'MfcFN8l,,B@%tl] R}H5kE%KY\;׶Jnsj5M{]ZCE} RJ&UFpjې)F"3={\?~5K*Rc|eS3NԌ|A;*S+w]ݹvljYJ|ٔF4gV ф4O<nz|s,xS$[6j {=ᛙ_g |?~/&=4${/73<9 z<7Bzշ7=aկss  ו3]_ϯ([wi=xjޏE*oFy~[Hϖkɦ}s[w5a&w7ף$8kD ?[Fq>:}k*)~q= χ,C.Wpʻ7%vT(㗿u)>[1xVz2xLC =|䗜R%z):yq;s/pw|ހ2wGdՕf#fqjXXE5'~'Nŋܗ0Ii7TGdi.fKPŧbRq0z3+lz A5B*,ٙdi/_m 7Ʈ3_cMPwwT2C2:8UQXxud{}ѫBҰru|)Đs ;GxFKL #&<-*e<7yHpkfT耮{妐Ctvms] `p6)61Mu*_gdi$O7P ha=#60,F(Mc{axɕ,?{~ͦMEY6 :9`!K-LZ*k3}݌ph3<ߝs +\,dk!gF%^ܤ)=S&BtkYվpQ3ĉ(:&cs&hd3[E=t鬆d_ww=>_ Mkxb{ 88r7w #;s./=۫9yG&n[fuvdMq}T<|zd&WIyQƏrer.=:YlBg5qңB0s0|SsRWa1H7~ /0Gn8Ѯ2)Ud+ʤeUdG"Azk# EyG۠Ngx}G 8x rWW09kV1\b|Z;/ފP{.p[SDՉRqn.GKQL.|E!-K _'GCG; *;mVz8 IycAk4@ N9寧f-n;S˰Q^;< y 3͐kPpA@`6+BZQ&{aMOyHɰdo( . 30Vj|r;\uWg7\" kq#(-σD13\#kT41J3ñդ$٩kQ r3i*'q|aq%xPBADkT8 X1 nT[jBwjaOPi@` \cf #D=s('5 -pP☴HR3)nq"'&[=kWSg|9>X3m_Hk2/$~QLA6‹b˳@`b$uW5͖Q}PTؖ8.6"@BkR'2i"\)a-%E* DJ1GͪY6VjQ*$:`JMpd(1P1I\En05{1wl}oW-Z"=퇿CW qy3aa3B]CEL'(^^@f xhPӜjૃ#I`2u%<5x;@q(,Ekkʬ#wC9aȪ@swaM[gO} loڵց_@&C/@iNB:\=]Wܹ-Sd꬚P3i ujsd2-XIhvEM97k$۠@SոѬ(A@ ܱ<}/R1UVL+k*tZ/2Cl#ݐ?o3g<)B0 ]:AQt˦z0r3Ȫ ʠ*7S?G~R)qKrPaOBrn8FSr [ /bW$a:wpNnm@X{@1zA΋t jT.l}n/B4nnC5+bN"RHw=ii{ٜjMRdZRVK8l"QRM0i9aS9!O؛#JYF?jy<e؁3_-cOJXᱜ;0ZK[0\*|u/%7 / ܹJݯ[*|j8KEBV͖3Ԇ)c2+aI>X,ާbI@,T gOePpeÔOm4}5 SCHHhKbly;͆¹Jw cMyTʻ G>۬(-цҫWeZb}ͩ bUi&hNf٠Mh'ExeD!MXLxdmMD­; +Xy6v֒~M[9bx>~ ? ́9ΖAZ1H/EjI$4Rhj#bjI'wW9v,q,dާ9֟9% ^J'$ޗ *Jo\vnh>7.[OLk}rT[]9Qeet2D\Pt'H߅o.3\PD!+*Q)$l2RROifćO.ޤABuH~Sm$v1C`.8aQ5*|ޕt&lGDNm(Cv(1O4a~M}7 㣝"ВXJ6@?C%rsub͐醧n,K\"4hs)rMՅca#Sa} 'B5.e* NGdt_Q.hs 75~2(aTaE;:Iڵ! A^!PϷWp8bj BHzYxM, ZI}}Z}5^ >z}~?[?{Wȑ w1; 7"mI/7HIEXU<$v }ḦQ҇6y rOq6]jmrV-G(N^Ё7js"scƈ>WpN/'Iypf<0Ч{U ^;.EZ-Ps}wc$̻׌(u0]ѕW?^qRBqkRUfBOP^FB :z+Sl/ G;<)o0}LaAaӲzVb9iH8GQŤ &∸\bWe?BWiacnq+#R## 9ɏe1$g㡼ɹ29bVXQNWwƟ<Y, a()h <5ReO`3Cp◙ s{#/ Oj2Tu7wrt* W@60MrorA ,2 eZgA 9{X 7զm縥&Xd{$*2Eɍ?HvZbCn ʆ\>k~I)aXVȽ4+!~fU>s]!Bȥu*k\ /rN^Gu4JToYhqS<[-e1,=M#*]'uj&yHΐfdwg:<+H:Ott<>Oz87~&FKˠf$UUB}ta 5nNVz>N1t|&q);CE0J$?ƥmۑzm ]$7] rvFhKwwxg{vS:zB3.:zZJ՞R)Qoo'}YT$j÷_1LiZ8mvl"F|E==it3 wf}WBQ w #[t?\ G-Wύd$U?~VEX*t'Si ˸*Ўi'Fb"/ml\ |BTR' >^HPRNTx00VpOg c9>XmQ;%wƢpwЫv{Adw o>m y^)P)ʲ[k(:RKE;:YBkxcc0Yna: O/|TI#Ehd8-heѴAf-%E,\+^%RE9ɵVT]fW Rp%jmcݖ{I'in5 S牟#_ aށ# k0+>:b=g*䴱n.5`)DFew5nN!-Iah!4 L>&F)x 5ꃡFjԎ),;\CDـFZWH!b[-&2ִqFnV:)ej%QzK~r[<ݲ裗IU%{.y7+R_dg#<7AY}J0eZʤʗPf{,.>c\A1)l\J{7uO%" 8eP1}IL8jS46w]wy #w)upwezJpڀGPǀ8i#) )ÒE0RrzhD0D8>Cj<!'jOYiPEٮNY5XPkDd5 #(,PȬWCnbqJ m5WX?şs)PW NyJUvoq3`H wsL9 IEZtyKɦ 4wxՆ}}qDe; >`*Fzyw~Qv(G²ch\ \gxqXL tr~ЯÚj&0P2q& |GVrލ5Y{@Y=Uw46_={\E* 16|d +R_Eu EsN (cDTDWH$F":30ghG kAABIFaDv*&8QϣRRy,8V8oA:V. @xn,bsr:Kh  ×eay7-0ęg R! ,5_MJKH J),PQQ.RRapn%IX652UTb L mA#?Hu=Ja_Olߤ}?O޿YIPuoDD+Ǯ3Ǔ霖w!/৸(z)Bgnx7IU-eU?ܻ; \tlN!0z\!϶$AuNA"RjT"kFKX/(r? 3T]=iKPb1]#G\(;Ƒ14DLpR k8C``0=3 Ʉ^>*+E۫qFY Co&2xyl㴖-"uC$0:_SiR81lDzCEb+`HO\QPv/s =qdQS紓Daq]h\6E3+,+Oi'l&wC`b.Y"3L*< [ߚ 5eML&}k"L9R'QKL"J%J'3($+z$*XPYF 1" ڎxW/m=FTP!`IBs# ID[, B#\Mٷe.1i)aL[ُ UHsG׃@͊b- ۴Ŀ< 铴hfiAT: qX)/Շ:PnƳ?7#l5s3ywoX1.ɱNއݧgo_fxOn(˹ѭ|s>GCG[&RBE󻖬vNR O u_wN}dA6 e Ц;"t ]>fep(L7\⏓.|L[6VS8Ds8:_hu č뒨SpTsp:u} \;lwTɌ 9-|KNjjupVKOBÜ$p]:8L0;"6~ 7ÃmoZ4ZVS:p"b1N;(H@ŔeDӨ)JJF`!RMS铔t?Wf&Qvbtw^'l #9ϔE??RE2o3wU4-ع˳f|1؋yI۲ ż3y&R Z$ ޽4(Fn:.%hy<<'eD5a+5+FSǩ?w, FiWŞ&!1?]퇟?<\ʽ|-~5a r8|#D ߌU٠`㑌Ç,+{-[75z`>-̔K.Ӗ'rI+֧WeH^ŤޑlٕUv7R6' ]J3>XVuƦXB(YEEfEJ)d}P=(Br(?B `˂ X;B16X9Ÿχ@zw_7N',fIalFY `M7ḱYKzo `ҡB{ bCT9[=4i#V݅ր}ù 1"G=9qSj745eaձܹdR`XĊaA$!ޛ'o~Uoz'bQ8OL0/F*WE}*$G8]U`hP;6kWeҭ$>^VSؖNc"e"3 ~{Ofj?~լ.^c NX2&hGqfD'fE-we͍H0ؘ- ea= -ڲ${ IXHvXH|HdypəUve^#  ^]ի1 }Aŀ~.~0n^"@NdҦ"0waN10KҵkBL$C:ZvYW0D $b>;̡6!h'CWX'i%@] B%$3.r6Er[stB.ჽG9|$G?=837Qy R,]rR12X$ 升 ek`8Qc3SF"1ؤ1eϐ#!9R7 ݴ@Fb,܋NaO|(% Y ,؋t]4kwE"D9wn_H@0;Ėtd}O)aD2\XFZh g 9jMdB¡ پʃ pQd5%DbF:,vy*Z `FBCg$Ȥl Ğzޮ^wtq7{Qlk~$XW_<=s՛ dx_~!-'0ܯX`gO^~x ª=΃CK_ME o~J8 @y/]ᥘ Euh(xbH.IKZCR-5/YV>m:GO,> >G;l%Ǚ{ K9^U>aaVVV?}O6}V]0Oj=}7 Hb5=.^M^_u:{̯x9qs} IR\;0߫+4"iO% bҊ+Y̙PYXݶw oݝ@)-_q縯N*Qw 9W) `g#eWeƋ3ߑcZ_܉ !ﰏx_)>著}̉XIW[8OEh꨽Šdq\0r2HJ類0MH Na½fu il @UI˧\:^ ٵx|` OW}Ժj_Ίdg䲣Bp;8iH6s̎%ȤF:Loua(LBLƿb բ{ď[i794It+|] 5]䩊%z3`Lp8Ve#f(2:JaJ$l8joS]ɮdJT@]8{ ێ\=Zofٽk i2Ml1cD|0FS.*ϻ<ʳ)Xjh0!;(D^YO ,$ H:B0wLKc2Pe*¥p-"@"Mήp+{6a M*q@ N0W@:& i!ެ+B!zȕJ\rKIVVrIJnV<Ȉ PM@.  X35^훪6㚪67H\h#b]`Y{ĸeTm~M/F-@x( 8N(xR&8!TbJ6q`7QA M/2N`Kl`$AБl3[9wyGS/yRxeӿo@/rM2M?'@cXnH+ CzU(YiIf6-i写Its} i)=\.vPsŵa`1Uyx{|n?S[.L<̃hJiM,J3ð+̓uy[V$y ~>:hp *wN/x1#qC19E3p ܶ'<~hll Mr~4y }ɼ gG' ~R \<v)(D!LTW|_E.;RH6q)IXot!^]UAL_?6fY-'=\տ&Xh c5.50'̼4=yGbZj Ǭ}1Irpj+lTlٴ|P2P'fyO`W8=ʐ@EhdeC>(*1 ^Pa`- !ALx c ^ *J(#൰L(; p '05ay8{no٨E;a`0Ҡb2(VNi@H?LԜ29TTs,8^Uxpd'/kA~]\?Lb8$~%J8f6;mvȜ7b/mn !GoX>gG' *_ f@u2mAQmCe{s,nBeUj+s {ǽWk FH6#L,ՆpCU[N?4mVS8zXQ@>4yxWԆ8A"z :--1rX8V"󂄡$$t+WZ1B @Nmk^jI4&Ёw'c-eClCUՍD]`Zq1AeUN>͓93?әǠBދ8*M<&rQ<2F96RFŭ\(}sp8W}LFP1220)'gN^T?+7mp^8!$2az/, @ RF%ʱ3 c_k讹؝܅dq!4LJe3ŨZ>pH1@Dchg0{@EK_CQYi|+&̋bd $"PRd ZihyX0c =h{$[&"d[N6qXՊ\Z\;i8Ӏk!B Ij2BJXxFCɀ&gP#:Yo)HkW&@h %A) GTb'.E Dvr㝋MtU۷,m`NŵYkIXA(3s1xȹ>M#L j $#Brw۾Z+<$hc8:Ae!dE.֋EIo-֦E9, < R,L;X0f!Pkcn%Ɯ?=837Qm,rp=|sJ5f2(pRjxk/S$k j a3+8 q1 %U;?CS>̛KLElvpgt,Xi6hUSs)4J{ٕL߉ CFFX[կ?v.nhkޏwys2QaN5eOdNh]2vev.j+cT#0FTD(@p:U8--ANPh %OG+!`V\ ~+x5STJ̷fŨYCBrAffDsC7#I_DI߀JV(i}͡8 Y^Bj2u.`OF؏ic?őV<@2CӉ% MckՌuq\FH(uiʩM]X$ >@]APW}W*%4 0|oB5!bQKqZAF8Af %mJe)a -$ 0H)wI2Il+'OL?ƋonmoWLWWjQ/aۇo^1/0  V^T~qr!5ADhT\~lXn74Oiys߭J~nt{wɆH)^}#%㗚j\6O0(\^Ks [y Yu?p^F T}bukx\K7P}?Һvq8r:Y7vMgZg&Z)a57 ?*PbӁ'i±8K{|l264EQ_]X[K,."PT~⬅OCh 怷&_Krh^pRwFAF z ,M&%pG';i[ )CV4VaiYJHk]n#֌W2M\I{i-u{NVW+.?e 4^2w]:8Dgi,Jø^w\:;dhB!'` |U1&ʕs*Ph7k+cҍ *P\NcHn]EJzєbVILxb8GTfʈL4HcɕI0.I#)1Bqmo`*uc=%!S`߮qE$U ěs(hn~-$5YxUWހXo7@-.+pX=WH3H!FS=AfEbLj iS!#9vG)pܻq+D}Cn4ZHj덨A1XBQmA5Si&HFT[nά¨ۗx&ڗ o[;~u.$e:J+V7]ƁUoG5ϦxkGihd"ӖwZFD. 5ÚrrH%\ߺxy M>"IxB ftٕJXVD:^"WQ `6U$4t]:AZe6JiQ_f-%@Sjr&] HI Z#:7Gb)W"Y%<21VǩHh3.OY\5i^Db:t*TT}g>hN 5pӡqFS3d }h'ӺL*wc n_Avl'xoU'1kݨtG H880 4+6}^wgR< p~kz0iCBKH!2dXKʉh,|vᮠ Ng-Z~yϢr Z-  #xw?7+q'=u=%*~(͆&1 2BbtDd T!h9PC;_u8UAg3)d֨ fmZ4:gmf"l?L`D9tlNEُ:eMiNJsRw ]^.uζCj2, Oه6M"gkK>7SCèbA g߼.9fx/g/hD&iIxS;=СqNxƣ0ʪ 30i\%AJRݛ ]}tza]!| 4p: ؎Ƶ4=[|+ێ%;h |M\|{lcfÏM2Gn2?UGnHmu!\TJR"8[@)$9Vp`bd,^eT kH`_'c$!Dw# / Tn_RJ0Ny]x9 "Fh-qF6*X->C,>&Y凟>Q»nQb S(,8~H}rw쓻cq$ cKn$rN/4:+3 8qxo4>q̔fDvU7vRqSҒٗn6c_&j\K (;&^h2/s_|\9uw? _c~9'7v~?kvmc߮m۵ڊ#w֨9OyH*X g2xM 4eLe"u941q9O.^"˩|zw4ۻ#.g;x] w.Tٓ;-r*{g3R*J.qN"VXX**1??aI?ܻBwQÿ*!|@}Xj%VS"U} G_U |R;5UQ\!]S7W5%Lds|wLUJ[ <$ '5$YeQ)NNRsc&' Xx'p `٘ppRc)NI59gWJ R=uA& pűCLD |Rie6|cI[PV'"oDBIȜaH+F E' |Ѷc[0UNԌ:yn %LԐ!O3$P FِZ4*4[::%r@NքR*3Θh $؄pʒ8x 9KGHqFxp(rƉZI& @%<1ISU>\Nqs[ޏ}ix1}_<6IqvU5vGrn-7߿}˅bJS0x5~puj}Z|=^CDF7~|f2tu(*)~&/[߹[#~9^]!1AxA7RrvIR$)*608$@-5[*ԔSa/& A3ڃ_(tK)o*zn3c45=l3)']+/]R+ 7'^SC-^gkDc.D"O Υ\b.$'3вmYR >]ؙomgշVV.+N*!-ηź8 k1 d2SBʔⰥAXΈV*sJjΊ+iyHWr*Є=hR8gjyۖeɘ"GwOt@fKSBueCxNcuYëwyiEjI( i^QpLup5$S }ɿdNV|%**0= 9^jE8\pH%ZfABYO(nK@cb>B˘P`l] ]lgKw{uFz-'SKi|RFwL-5ц 6;Mώ;Hi^{-gRWWNn1:`K->MkzKHSLq`7? -vͳuf&UI])$?RxP~^|>4 鯷/t2E+QpO#=wQH#uLĪb%*TTdB,}U]~3_.cB"Np^h,)'5JAWuJe{)hwc kZ4Kl^~ȓf#7EoO'q/kmFE"2/śqf 3Yd$/YlңĶIK-$+[}k`-]X*.b,[7C"0`m8t0$ZgZg&x3UB zC zˮ>`'o+3d FKa: jNY RGg*} <{"x<@6xUٟj |SfGʷ9FJӑdZRb/vdx* X)Z+a[g4zu1z(l0e{[|my~8 yi;|qS ⍺ϘRtnqxr8H̘DufTU|Sר&{TY#N\M{S陊RP7"6kHB^֒Rc];V[[] bD3hS i-ڭ y"ZK>zmj9uə59=QZJn: 5Lu 5? "6WLܴR=́U2HIB8%LöKelbTSCfWF9Tl\[:eF*D1y}e CFT;2ň A\?\8T(' ~[/hQ筡~WߔΘ̸qS@^ڼqlSN ڧDԵ , %G+vhWU/|Bi|iVbK[89h%v8Ĵ|J{;$% M34W4II&m7bȘt %s9ͤ3*Q) ]2Jx ;CPs(0F[ZQCB!G!r>O9Qn=i_za(ѽ 2}ʘ{H/]{S%Z?Tq[iNT6Riay<9 &"b1  ǷIR<*O|=}a:l]7fw@sa)n U9.6O=Kͅ.U V2KEx_?zoBMϳV **Ja۬[ 4k?|I>L>Q;Tx:A)EV6^O q!2,ܺ4}8zP^>!|xuq1]٣//cp9{EBtÃ냱m1w?xC40cL%,מkeSU.d*.r8qeYNJS᫫%R1΅NhBɼW`8"7^Re3]{B*C%)8md{n S 3%PBL [/\7sOO>,PoRȳ8kJ=*ŭ >Cz>8EnEJlsS >t+i> :+)hӗy1XR\`-S,E*t# 2SDsւOhӒ#HzzyZS?=Bw;]щSIR7s SQM:v(:sq YlQBRcaZn* n*(aBe顑CCELo?W~uBލ}b¯.(?@Q?\z;1nd ,G`! o59֧ ц0wH])m O|t[GgfIcXs D*2*᷇籼d*V7)6g hh#7(ViڻA15V1ܤzKtԘD+% t[#.Vzn(uy4nR8=%iTL[莓PhM3R,ȋC{EH'I#QA/F[p0.&?oele}=(A\Wy$tJ- Z[x7_/.4IԦ&x2fhFnʹKȡ8yxx`No_7{g'O.ZJЪWx?救TVO,Ls(OU|p-3HH=aEivp킠HW*G]r' ':%\49ϲ+j=_XM]+<2Z8Sb)n

0F ϘܣmwWz+Ï|%.wC7?f址 QčCn︋CgMd{oook֠rn?xe **5Z FV.]9yO4<:b7>0Ƈa*y72mq̚=L2#cȥ睳iA]%lL1͌MDwl,{0.Aߦ\eჩ=FZZggtiEkpMD)xYg׳FJ jݲl" ];9  s d26Lo"MdCc=OzgQԀ`2pjɤty,5{Xj嫱J qE{vW%ݏffg8c{3ru둭}UưY$ΜN.8Yzù GDL\+m {CL&kP3Q-sRh'<L9!w{)"R;0FrI9=2nټit]v2Mx';x@8CM@/Hԯ*^6]3*nn aSOGm ;;[iU]C[;7.|ԷTJ&4iB((1 ]R9DRnBIDvDvF45'~eKFFV*O[t*pW\tTb@d4 vC7zOA.g@JKʛ_ #R@Y Ϊ.EUͩ@>c0Yɥ5e$6к-^oBp3 G?" :G? B zvp.f}~UD&׼BWegqI} &75({|Mzٗރsx8[CqjTY[Jv׍m$*@.1Ҵ]mca:KN1fT?Q%1ONGMQ+) 7_UӮfJ#HxMByg.>*nmE]YzU%n) _iGWMh(GDUX j >s:PG)Bԑi%*O.]b_[@GfQ'ZRiJ}sjAOq|ffVPS1 v̼wx,aLМz5513H/$!4 p'x`66s.U&.bg-q":gTdlG8OwVoŒN1Q/1)0.z-̧TΌnOgЎ8vb!vV^ӽ0SAV#8RBgbLZKHE?/mvgFVŇRH>*0}PPԱ+ͩRh[QTC4-n(}J'do¯{Ez (/C?X2x<2Iz:u벸0a@N,:KA@<8uB+2c sOaWAWY<Xʡ_7rI,ji2{ v6_vaH"eg=IG۶lحOR?,7v$?hC('x!Æx>@"oT ?+_/W|eRY6fr<Hs+P^$- enDαB>6f\TM L!l ֎a*t„Ҝ!- UF[Qes, XL B$T=-QHJZ=#AI 9e ?-٨L]ԺAȑ2/CL ppth‚qZ}(K"d^j´P$G y-j=Ax7`"ms%1#S9[tڮ+f8po~cw&q^˔'VRDļnX0>b!t>l܉"\w8dݸpIk]]vC}0浍RqzW/%Z*3P.-qM'QKwOZ'S\\ݻ.FRIy<׸/J< Jm^#Na >$$GV#! 1]OF8 UN"3$Mf/iZ=ytIV g3b8Pd'ڠwnnmKS`WM0IҦ"=Vǵ)Jr+l@H1$$koTRkqu-&DڊՔw+h݇gXV}5I2N[ BMp3U&<#rnKuVjf=" D=Ƙx֓&/,)ϙ3G<kQ=yJ6:e:k[(o,VmN۞S ̳Hw]ul$B)gR^Y)U # 2c71&9!#gBvxѹV9..!PZ䔋(&zRQT QHpcJ1Q*UeBVUJVe;\f: yV(EI#~ 绞2Lmfm6+ZO_5 *okzg`oy>]PӁw:ܿ=MtqѺxIЬeNzad |6u5%m3 B9 6^Pg]1cp=p}1^Z%w Ճ_:gANӛvܵl;6: 8Flyݏ]zʺH면s6UD8NDH%|De+qȦ2 %LTF(SIX'@`К YA k+RRBX%Z P"夢9*gL(6vG*pY#cF sVHhl!yE D%ʜRSIq U`|)X ˩ɩ0PB8}2WyJyQg&eŐ̝Y!,̑jvU B2IØ8%6uTX:DF. *Z0(JjMEl|˕?*y_z5A1o?XͦWOEu5ӗ3/(%<}b_o?[_5ɌЬYG_ i3:Ld»e7}qM& p2K.I:% )Qr(4+ jbrҔ3iQ8ׅPTݜuQ)݃`+TabRPq9ՍP_$2;鸞KO ^KB3ö{^p`Ryi\[("Lj)(-pɅE.Ѓ.Ņ^?ucԣީi(bmkHq_ヒv{Wv1j7QcsMn]Jl^ӻ{UK[1m\:6dY|<%A?;.Q(pZo랷Sp(&ӡ7N#;ń{nw|I$sЭDqCRD).;BjzUVFRKݡ*mTa`[/!FJ m"_-87OhqF0|L?x$6^w]r1AQ0?~M">cdG͉O&D12Vx]2Rlt&YadUR*!LցjIpK&c.ƞRGyLAc9!cfT^:7 R͎g@pBalowyE(>۷7ϳW Qp}qxw og$⑰sQ cs +h^@!0=BmMHh÷7R*?&.V!tv jqrY@{EnKcVVNKݟd'ۻwmUwp+C.DäP yN%J#*%ss78^SKM&T#n>iV^UORn><D0|Mq3Ӝa^?s2v~35?ڈa褴fÈWkB|PșT6 l±S u7FX <&+dUdh4njVlO1N!GC>LʩHe ܄Aס.MMoԸkP'Qu;{#!зfbu_@>p;v-N°ķ d ym %1MzT͊(QeΤ%SU:=C9#AZq*8$LΫR$vN;ΚSE -q%Rcca CìeQqY+Iء芡2B؈OAbMpnZ]'qJw>]ej@R K5"9Ťm7U%#:wiaLTcrPDkk]@M+ ,V+Z9_l bX0\8ZHI**"&d7g$.LA: RIbK]d*Lq4$yUVq\@9q "䌛­hfF\):`L!BS\٩ mB%PcA p)k)اPyGL\;gqh*d߮7֌()iz6n}a^X0.o` \NcnίK|VLqSeX،p^[KQݍ;7y]^"n#DU ZnTBNLȶd` \嚲"mEX=%M {q@+X[wDrTj)f9WgN3{Z1 O2)#8.r7 RXUO4H^YuaWA%+29X̻ &J]$# 1>I_= i R)U wFPJ%WU+ y^b }l\5͔=i& )%Y~I3}0iw;dPAj#S Ne(ݑPۼj sr~S.@MmNjԖrxVdxwM#JLa?g= Yxp 7x w; ?q};׏ d9?RW/IvK䠋Cb=Q!P54vru}{]iЬNQ-9Pq(̺\Hf^H$Bvߑ;-|1Ê}J^88p4g vہ2-F/.?s~WxHplJZZzw5==x=iYX=8 FxAp@kJHbmp O4K*ޠD/AS1LJ#% y_>HJ@ī]SÝ=//7߽Y,/61_%?^G_͕ I̊4Hlz{GQM~E76ќ2e EW}oъ7=n\V 6لtV#JՀv65[D=;*߰TQ(x{|J㇎ԇ9v2|f ̠uoo8;F% 3r2R->AJ<QBBiU {*սkq-U1DhMNz]59qhſw`黎DB5~$_ٛ #1ԯöOa 2+p 쇗^㥖>NbK$ʱku k -Pl%k"Tx5 1~q]jEj:cmK2c|3rr%{2xc׿H}Iw_t62uN1̢YcSQ']Q*zvP%OKM4ŦDXCM[*!6\ BҟwK'nmX7$j5n'nN3bې7?ڰ7nG۔v%4+TW)yKAE] *9o4,4;JA!(T*U}|b+7vXb _7Br-5v{me_;-0^Q9ܘ+gB /~uǍF׫FGAsB;uf1InRƱhoQ봙|OnyguZ|Al \-뵝)JqJ9~[TҘCϔ$I*ajy?-*᭪(>D=HCros(dI:hiW kSm(x Ey 8M1a`.xnVU9TF|Dճ.ލ7:P#se{(|Ib4ˋ-1dGZyeu[%R)zg%Yo d[g&χ4_Rb1ϸ ߮%WBgF@* Znwl?r!O}l,]辇u=rz=Hsk~s­W__ x+פ )QسlKGAW-NE5ISglOm((-5l>ϪC54HQyajG7\1y>{I.)!2>IQv:FXzW4 Ur:B2. i@ (: &Î9|Al )zJ*#)gZܸ"̗^|?Yq&m4vWcVkĈ3VUbXUZќapnj+C\EU C-"EQJI-kxj?P}FG8镭u9jW_XH v~_ewK hituiľ~jUQʙ~zZ:FWC.Ti-f lO/|f̸вO]OijjԪwU@e~ A rB"$jjߒ3W=}[ F/UN.8.l"c}8Fұ4HOy@WOV#<g aj.2cYSez NB=/żao%^x8o>ɢL24i%:{dI;; kCqePg3أ*\ W[o{'ÙE} B5 <^dF>D(^(S?|2  i㶬 fQLG˔me9@Pt@K%gh4e8}\j5֦[SČ .֏F4Ea]  s%ڹ5R'wjj'4~F*E5|`qəȍv_{^)3vFs#TLfE | _?>,m Q4љFH r YJ sZ`ࢰ9b."M_x$B\~]Qnު ES_Z!bu_y{Kd׷>k"J6S6q]@]S `Ti7?h$ 6J|V(!AƤ^&a7o6L*YPY@ĽI QΛ.p*,:_ xsY8YxZd /2.yQh`y);1i(YϘq_^iDB/~%1dNp8|zG FQkv3OB7pjW+wUЦbXVk忊?x9 +3,xD,HfFKdC w!\ؑf1ScJ_jOD䒠Dw. {JPLv'N{EgB8RI0RJFۻG)\ӉN@HsO*ҍ Jңu{$_g8Ϸ>SAS26#YzY I[%cϡ1{RB-fc8 /-k)q !"pNE"uR\^̖jƢ~u4/ ?)%M);9JJ%RcV ױR 9jRN4J^_!9s4#2eIcVc,ϘAUC#rӯ[Z|AL3(70Fi݁1^"\7_{œl $1L6=._` %VղVfέUW~Rl\AH!Iӡīݫ+%Ĥg%Ӎ4 Ly!h>l$޷tA>[uWzz E]h O?f|$ׅPh9Ph9(Z\Dl`B"s"#E^ S)L:ÄdXJH9a!A?np6:R텮vi!^l> 2b4?cfw47tɃ.ZRIJXIĩ>K40I):mvmvP-cœPˑc`ҕVyS$ZДy̥eY%c' 1]()3eRr~"xţ#W \ \eN)xĂ9aHs_H[X!4v* {/u$Kd!OnK5SJU/ !7A9WjX}lKQNȂz89⽻ԁ:u&Jq7N㥚 B0T@ @@YхS[(섥HW]Hi : KfxT$թ,Պ p-T Tb"D1rJ 'S/uJ! .ƆkVX;kU+^ݷ 5Sӽ)MQ;vO2GDjs|p&(=`(ɭ˕>W\YkN9`(N;-q +f)ZQkK+ )PAlr& MP n "Zq"Nx~X )MJ`JIV~7)>Q ;,L.q;z}In7(ԽG3*]bb%^'i$[AXN]MFL0qUccŰcF,I9]j$+wRFLŴE;XILRc9iΐ:ZGdN; ke=z,PMg̛ŮcZ$ɑ] vt.us(zQ+qdQ Ⱏ5yg 7R0zM]Qpo/\%p!.wTdR"LQMa1' @ mx/6װiea#ZqkPs1MaJ0HN>ΈGbC3"oqm+LiyUTH#PLH)| UD6EKq4Y!Xz68ћɻ,0OCxvՎ]r9o<ޞž{ߏp?:t09<@G;eø7w ^ xߛi,!0}NTòO>qVLr*;B$okYf ű]3t4"%F;ƪ~V ձF>C뫸V՛`\ pG\\+As`K7uW_'aq+; <ݨ a ¨ -FN2!KYAp!L`^,t(:|u}?328hDOqD@ahDOF 9qJ r.tOΖڽ8|OvNm*uIĕbn5D^l?^lbt'nTEK [T\ZbM~ۅ^ZUqtdBL]qL6?j2+j[6-d궈>QmzTmO%aSeg<=3Z*9C;N\pěC~ E %o/I t庋S7 tЈ5)#d*#]NǢn= 9Ax T0ya]PrOO_3OOQiuD -;F#|u?BV`;a93Ns4ʒoVVw2GQ^Ncƌ3jzެ'%y=@vJ NP_O 4ЃvT{;JdM}2u[ĩHG!1!,&y %@۰f<;Fs 3}3S-];8q`8pQ VI ytB#7  . "0``t:8=Aw *eWņ1&7S"/ {tPg"eDVx]CU:= .ꄫb5J3 Ƶ[(LR:+A{ࢵjTif$ΐBK@K%AZi b dF:aPh2mq.XvT zObc'(#oWq*/oB} Sz:o?̛}! o,f{->fw(i&BX$km딅+¸rcCQ:r|-,{'KNF̖J/E*MȽF) rE ID{ fgX9.bE>XM ż=TO+,W8 4H{[sX]wשUDBX/H׾tfHCXu &S ƍAq]O JM % `8q4@8Ur5VU P!:UQDŽSZ3TȄͻ'3}*ޚ,g|Z|п5<-X r*_!& HOGC~r?P$29292929( `2枻k)DV8KBcM#ǐfj?+_S+6=ܢO&Ii|/u^8O{ 1Kgs0i7A?gwKdaI?$[6*e;24-OşwFf:vurwVa>a`v=}w\NJ P7˵TA[Q'BC7U 6U-_LX㙟&PoMxqCg#飁uj!cwbm=mV|_  \VIy3fԛ۷i>`8O5 ,p_ަ0=w!bT,? V͚e|e/AS])u96N,a[=o^M?27m@Ed,* _޺YS]Tڇ\oŊd^oO#˔h]E[6ۓU qmNaM{=~ oZR7oۈgu2x@4k3rpjL|z(j_y8{r5u:pZkŴZZ/BJ!*q=۞9 a2a݂90vϜ[Fs%khլ[OE%N~jJ }*1P-%W*V,. 0Ǥ@h;!FV/fGj0S1&H$&BuK L!2V )K*\-e|cT Ƽ:>2A?FQ1HqB [>1 c\8TR%eʆ"!VU^FKݙbƯaRWwڸ(k  yWWLQBhZF&PGLƓlV>Fb ::Μ[C g5LVza@K]7WeX|A$o`=G:zmsTDHQ$EQ&0TB0Jr2Ł3+n/((+:#BAyFaG]VZN48ঘq#-6X.sߝ/8o֡HXc;lQ1GBQT^?#|TdVtυQ:s9ZКq\L[ٮ؞rOet{Zz&~o?ѷKU-"> Ubq~q%R_`!u2ꎶvKpdlrt8 +6Z=6&c)LBKҎʭ`Va?sf pq/5r*P3o兠CVp[ :&^p&x6ݤd'bjġ]RjÌһ?!Zݙ!H۱Z{YڔrkF'jS?YaQyPB=VF}v97rRvo9/ѻeHIrea}(m?y؊jWӲ=%t[MDk| 0`\dP)/{fuf}{-݀rs=qJNhmCA%O yK!s=̲1TO ؖX U]N]+ALa{N{<]~ǫA"1@K<;7fbR(C,7 ЖBnx6ߺʂ~Cf> B>>F˝G aC d=A8x/o,=< a9YK+w?}|ax21h$I;{Ǩ)B#BGՊmEBE>|yo%ˍ?k4?V5І>QF7O [QT.)(* lh'+Իc[-ΆR`&Cc͌DShAv9(3p5wz AĮNa5(Y{45XJ GlejxH>F_]yWp. 4N?2]D<@`VAA(cmF7ѼSl\,~,a=}ˆ\FB'Im'-bLC)|3k8܊<'{e tB|;bOf;Hn:ۜ,Lnr,~]=KFAFR({\4HC/"܁Df!SKTci~kɕDp ^QnR$=ˬ%v뷏~ݭUY噈/\!c *BhH"щR> >A=WG?P|҈R[5H;3o0 Puq7#o`E/\7͖\䆒%Z!0>8L"k+ʽgdaw \fWzӕ(zug6_yw\QBٱ̩uxye'[QH6qL2 V|kwަ۹;̖"JF_43RZ/hr]= DZOi*O)ey\qx?qY(!^7 α2vT^Oo]ޞO?7q!,i-Q_ׯU^̚6zug 5f|xŌ_?W'|zH8|<7b_3dLg>܁O77~IJNpZ?=+*0 k9An5 @t.= v:3JP X+/kHx%,Y7-Y[\Q\ӦH(ڼ@lc55ܒ^A‰ 洬rwzDC%CqsԻM0;9Ne hg!hc #.r?J,,&y y'CaLn4+0md^hDȄVuvyV9[86J!LA%>%%UBM'bANp [DʸcFlDyDKh 6޹nm"mYnrܿtŖ:rN5m9Dfʋ$>~G <>}w6}8#~EZw'ľ,IL\s2NDW4@CdR]q:d[:7z۫w֠rѼYcldYv6&.jġjM\;-㰸l'maʫ = Kݎ*'>샘54>܌nȷ.{>_Mѯi<~NQegп)&crP_&8:}.Sk#Rgܢ9m3u/&>U/c @`>OB9_&Rd&wAvY~^P+U.+UDy◩{_7%Ms[fƌz)U"1Q;X:]B:J^o$ۛkZCT|VDXïGvlltJ%FkdCdDP/&` hY[Gp'5 Uoi:U&.XGP>5&'W^|#38>(\ sQrhuB*R'(2~NSC,~^+r:.{2[8Lp 7VA浙{]A}~ɟ9) }O6*"՜ZJBy"J%PfBj$Җ $/ ޕlbjWe^o%zKZz97oNF[|58-,ef6CǷjР0,'ZN42 hzREͬZ H)Z; ]1TJ. ËR^-2^B3HT,&0/6ID@@)Y=[J&nIG`p(Zӿ:%"'.Yu C?,M$ ʆG), Jo `6p{י:k^T&D|[2ӣ%\s5ɳ|u!*M,zr5۫/^4?ʛH.$k\3I[ lJ=z~oB2ϗk8uZ?6)h|=LkR`5rhY ܓY2!X*6CkukZdeu7pn|/vm&d3SlrD#L^"iζh2q}#y{f9彾gV Pfx;Yn"Җnٻ$9zS@UI+Sf$ PXcwm͍J{N2Wak2Iڜ8yIEYYrtdr*}lѺH=F7£Aһ=gTP*}֫` oq0z yW\x뾀buDP3Mn~ Af;RB=] ]Ꞡ{u-z5.ICy7tMX%zkphywv;Mh5:G_9Zݹ9J6o7/S'/x^bnIFzƭ];Xu7Lt1'f ~q %>(S:5X&*)*;;ԃR7cӟ!ƀQmrNO z.Q4ZnjWN`8o4PTDi#GTf"]g5&T" uEH*gfa&(߭*i6@#H4 :hJs4nrS,)2pܙe(չEZ bQqU!TCDn5Wffr'6cF'<h+ -N h9B #uRha,֋4RdL"%ԙT#n5uNsP$ZV'D }AmpNW#<>$笵GA46VQ"rHW(hy)=mb‘h|t|*8XO`"[Ä}t>/}/r=ª\ȻM?,~_=e Æ ߽$=B0D'~~VaV<Xwww "jû3p5((]U?.wFlҭ dn Ēk!?~{pw\ ` h.s{!+7*|vɩ'\~ժ9>|*iR-pUq=;Gsn<mN/Ϧ2z'oY,FYYZ>]y?Gc|]E ͊{y >.]hUtV2 +cDG,J)EɻCܼr! y"%S q\n!b1wn bB:na@քr)*O-NcοJm՜5mMht&JgZ3!:A5hI]D1,+ St??Q3mCiqrm(ie.4,9^Vһk%` 0#*Rŋx* a-T=* u^ԣ 3*[T 0jQlT.9-&`b^)}4 B V17K3-Y1v?vJ]P0 5Hz da< fTˑgN/B.#O;0G.wP ~5Lë" LIV;=N)>p :p5FeϬ_=w#-`2)~rqt~syv9nޡ +;_;j6M̎fІ;npDht8,4RtɚHmM=؈NHSM{%~jYIǾN񶹁ҊO OE4JX-uH?v DtbQG b:n'[E4J:p6PM+:71֧3T & HӍk5;J?m t21~_C-jѢOYx)gM7^{I;@+k Wh{<+j7|;ZB_ JZBށ8}S/в@ ed;o6gZjᕛx+580B"Dlѷ ~n/萾L3A"RM ?BxѾB#vHǒeIUƥ͗}iױ'%jYPmF@*/ZFQ`Fj͉8ÞpEѩRL j(U٠5o Hs3dow8sQsRiY W6!niDRI"-O3R!ˮ~Sny4nҲf/'ZsIU$JCO:m _=ia!Tƪ~]j ͊G.70&OQI~` 7t1ub~_}2+&r %:/-ݦQ$tazN6s1P($O$r4X[VX xNq'^=P}cJ MHN4g6aiI+V %߻b4/33g]`^PG"8wWSa+-B@q'ɂ {=Y4 5ڵFB z43Q-fKp8匢2SuʔT=X_<.z׫쮲O~~Wg[X0 \̙メk$_{';U'y&eSG),GJ !q)Xhߏ㻰s]wyM;AŀJZuyݏ4 2@e8u:wr\aCr=kSg=k.tk {g6S D \:zxx Pi{gKmWIK]ﰛ6\5:v1|->ݎWlAӾ~JG pT(5kuõc#2 %X7cyzƼL[^YIJs3&-턝\z9^|> DhTݲ]rs߱O&ۖw꽶 ĴQYl9ZkC0PHzזв*8D-B\gF9U@0C<:v’4)`J$9:,N 0f,ϥvs QVfq26: \Dh b 19 VF!XA` KƄ24Êi pSVDS .a%9! () b;Dq%3k1rFv YK#qFFb a59!cS,cJ U/ GLprQX1֤:{br\d `CrXKeFLRDL\ 13qqzPo ZU.Vbс}(RP ql$% M%W R!D4cd L5^&w_ cz>V?oPMY=\/4wtIFyM2R -25Bz40np:`jV6xD:8n>T"%AZ p ]iN,H&VX ~ykmmu`SNUwHcW>GHP~CI&"<R LtO:q0M{3sFp4p d #xɃVBZNdu;@ fi훹3T }껏H :KjO5O\窻İA N +^e =İjp ,jYW}>n[/n{qxAC,@6# mcՌFA!} [CmbBl,R'R{ W`쾇^w ƎwC` WwM^,uksTo%Wg}^0BtNx ["o򦨟A]IL*'&@rِ2Mw%}x@עxuK׵tUw:3&*3J& "$J3u!bDY&"bQpE}˘br:20r8bAPCkuMdG"ɇk©[B1"ey g #Du,,Gf8s$Si 5,Ll=l_l= bPc[z"@O/yJ0H uzKK yQF^yaxK㖇b)f]`d!|!U9{XسlDH"H5 $Nɻ|îC@E }'ej6h|C#bDjL}톝$ ELIw$n\mSI:I1VIin3<>W .~3xAux47O'/`Z#G;A;X)ĸf3'!~l+ԣ߫[F<(fK`I_?^` ^6hhlx[.:~ȫ[9p~ǜ憪4k3ָ'O#N$U\IH1k:(εi!\r( iJl  ƨ (MAj,sR!#;JKS.!A8rZ@\,'aAH | Oc 趀+dQ 5WbQ1  RccåGj]-Yc-Xj%Xo\:|W\`B S t}ÄNGGhj%%ߏȑ\5ޖMC:l=D4|Z{7[< g1n/o ڛEPJJvYY1d`nr|w[X^_kknFEݜl/CΌ^25dΤ\ :eGg6~.,Y ARgvb٤׍F7Зw0lFs' t6ԁ%1>'rA]yjJ}*#_4ىi5y%Ffl!%gZ\-hCb2aĘsJFF[9bȽտ+A ծ;YsiWv^W~c~ wW .p#JV}۸{TYE/ca>" &hgp?eZvT[zy}Q:`[y_ 勛;]C++__̃Kwwd4T GEWsfXtJܒR~q]XP#!ujث8^{ULjte:䭍^`Uns;j GgTĎՖMq\j7v0/fkbG\H?U>+0Fv-;x:XԘԤv훃n t4| %S3,Nl霚di`* Rx iY1wH(S6? lԂvVkma:2P2IS,%.HQL4q2o!kX`Q4UXo!9T&"n).nӘ)nX JB@ͳgɜjC2$ j`%i+F5M*%8$M`O78~.ZT&j"r ,9hE56W*D3&a6[tȉF,FƈLES%mHdMQؐ IK4hҢW(^F5[`ÇigE0 W"4}L^2;8e2;KWf&&3'S{;U<,qoݯr4){x_OK!xcVlz&S|N=~l:*d ++zŠR#r7[E} Ru6#6'I<dk+7IUɃ}k U`c>RX*B~e\q!fӐo =[h{ k3Y{8ma[<ªa o/i̺ _ A:b;-@_4}ʵ;ּ>*!yOּ \' ڎ^ސ"H5ltt /x82{,i=f\{NB wѴT~܋>bpEqw MFCU8*xuַ59ZݹM8䪣lR~Fj1 k%Duɴf*pK Wq{#5&\wk'T+/֚p]2xcl,Tǩ'(mU+I"ט8a)qsdg(g!8Kail'AgvgH5:q= -O YPlC¹RBₜraXVZ%)%\S&Wok55?l9,o J7k!=(ywk%V/\DdJ/ϴf#jX BD'U1Kn񉲁ڭ E4JݢTsxϾz:l?ud.% SDU(De0{MHCf <{sMxk<}|7s[3Vh2[P/%)w4%>;~7~>Xw?~5fɽ̏= "_/-DKw0&QUYBH^tHV cxA:꼨FsfcT0ՏX5ʜ{gg֌ɇݗKֈRCӈ%J,o)gE;8a{|e?axH|lpm] r\}:A}[٪㻷uVZ7a5.L&H&+Cb 9P;k$d#_+cw"J_>ÓRNQ=̀4&nFT5eY-JBWPjOtǎj)ziP*q64!36T:A$#V ,8G(_fTvq\ !Y碭8Nһ4MyG#I{-!6Q3;yo 'u)53v"}0=s R7 s'Z&rZ N5k /{hi /z2՚~]wC~WrB:Z]=Þ~hװQ?ūܕ\u\Jw _踈/<\"_\Ύkrؼ!jۮT5I& C}89/h"1Ÿ:u}BJ7")Z=厱7+7(OPIfr)|[yr<43nEm;^bdWBZҒR{_W@:/ W88cbo #|5>#q0}?{7|߼f~wuvL/ƣb Tb hà }i!Oh`Zi:SShn S8 9"R~V.p 2xGN#zԱ[E`hQ}E",04X'\D If@?)c4 ʰ'೙w ?|%䢒&u:+C}x7o&d)o2.NGCpJ)GpBu g\d18 "L*'X~<r/k'rnľrHfm?,Gwv;sE A/m >.sည!LDs+ӘYdZɄJM fJP@0v&U.1H<҂PL0H;ei !DuRFM O.pf @BcJ717&C.ie1)S$%J&O0R$D&KuZIM00K,YV==/ܳ&d1D.).Wl^?ZkbR(#UjOx*U/\/\D HpeHdt]?K44QR* Y Jн*ՠٗa A; ,j0QP{E4PC0#C=8lL90SC#:c)pdh帹gkJjBǫc !!SaS5Mfľ0#/}4c)K#'yJ,9]b^tEEGI_g+EE]1^֘`rAH i!>N.WPڡNDS;r(ӎjˑ\cfHvJM)c8eN`e!SGIV5 Yx&1ܚ9 82|Zi&qgHx0KX12 f1+L9B8BRK$%pD8a%:!8q.3, 62 ˌAqi9&w)EIʰ5((F萐U/1e ;ڤd045> fRIH2ʍ3(2,tʦ6E F*| |z񛳓XD8"})SaVfp=G߾_~µVkrvyt,ě/0G1DxXΓ ~wOhr{uW07@2W^~a FD QO\H}>?t㎇rQZErj]^ip GTp!. 85pPKbKThsѤ䯰3B/+|0JɭDցqdZa  9JvsR`שB3鳕b*ݴY60ܣ_د_7T/zUo$ /88US/㎇J n.ԋqݻ9#3qoϫ3? ?RWgUWT!Q}$2e i0Es gC9’%&$W ~d4)LDt\ܛ\$ٿE)Xm])hR{ní;=dӳDG=Mi4:;|<//./ΚGe#3y8Ħ;>iJs8|p!*d]jzoȚ_ 2_%֬|B S,20ɤ&b6Y'/}ogOy? 95QOb5 !)6>Q`- ~ rht +)f`\𛐦e,4Ln*IR))GT9O˥#~gڶ:'XsI.bi5ddY01l|3|y `ٌ r&j u:flf ( uډizy}u 4WsH^mNN%jyvT"SznGyS4#r@jdK&\UUՇPFh&Ӫ9Y?i;\<L'{VMff~E*҄ 'ӡvC%I-H{}҅.;i$-!LlOS'ɡ4b<^$,I-^&*l(ĩL&,n?|v\w?{Wȑ YLyD@^ m[kmg$%)Y2e̌ckT7JGM~ٵ]O N 2?08ya ڬK6{nz咖)yiSj*mW>+BG U-괚*mǽܝWy]9:/3"^;goAexm{5?{"D̺TW.QO!%}+>|JR9lx42ie˪V+ZF 9{Ϣe)XE@$^q1U M!+[5u2Wagآ ͰLDZQ+CDNPNIfD0VYXjK\|"jZi:oe)@ 2KHsH'RW)N1 38”h]&YYcv]ϝY m̭m3i$=J`%x E(,.8#$FoTwz*ڬ }N+ׁm(_Wm/T"zS\/zS"Z-7*-IʦGdP:܊%n)({${1S_wRg脝#Ҵq YζεGVK,fMM12E#E](Bya X"r]Uc"΀IɸHWq]%Ex).4>(D:k/uc1l 3S"9zwSy/G8ЩzZMW9zl'+y5i2V ecU~4w֥F@X1?ttd {vmQy0)%EG LOkw_M'ݔcnق_&˻~W6`TkN?][[Xj4^*l h km7 {=a@Li8õkU2)ApEKRX#9DˆUm6eԺԌ0;ǽJ0)&PNeLY&UҏRO ߛC98/5:Uc:ޘJ8,nNKqYmX6Z7#o60yAȓ\#\Ƀu% dIoQP)Ĝ NcJT[lZ*Gh۔ZWw>,֤f:B$v*[Fko's7.qn{`"yW?7౤_Nhޤ0[*g," $i4g^M7#>ywKWF:U1Va8!R$b@i:my6qn_4׺!p=^T .u{lˑC5!z38no5pG&_+w튉b _ {7筎nι͵^^R;Z./YmRy(>~ۛ:Լ۔NJ 5(yrt3>f-vHkRmd 7e헾|O?)fWno|PjތJS4g݉Vş#lśdT;wS`:^hW䦀ZjJFnآ[0q!j( X0Jԙ4ŎgR[- c4FbmM[^Gs"ojϢ\RVR3 ؝MFxW6QQ!Z]FOšgS?H0$ Pz)'pC\(8i UK܎gRJm}=LhBx%&҇Y Ti:.y|ɀB ^$_,qћrU-7A.R}5_t8tJbAyK+&k$ݕjg9o۲.ge5k`+y =Bn5B#Cr 7/\[eWY̷ޔZûQxMahEacPsFviսbUگLn6˫{Q4bzjfG"DN!QغJ؀r&$:=eJW;A9F@C.PZ59}rhpF M߮F='U] EY9B]ӨFqOi޻P:%L"i?3<*/6CnS6*Y 5O.77A>!5R^ʼnjo_X3 kDMSG*H[0xB{0'[_n/2@xO榙\t) Ի0ԕ[+Mh?n"/!N$s'6ģ<¤K\/N8E^B9`PH\g*p97=GӆEK#bW E,l1h{jzn6/Rլ<>6o~zTi:=4lx;qjиш™F:;B^`5Z6õz6B;YygJ8ߟhUEWIX+-?//z?o`B>ða?{.@T렟H0-|v hq]s=ႏ"+)~v* wO"3wWAt.}KQmodndp:r†ş.6;| Mㄒۓ.F7f1Dy_i l'nB ~RK˅~*UyƸ 4^:A!_|QU}z4n٨.'9d;PJf7q8p\<ģ'dʘ$xc%,a/v $n),e2u]MqZCiY'a:f/Z˷l! \߱w>~~Q`7).~MD75Т:{įO?~Gl"8Ryſ !osmfVUrnnoImEF4翞ܼ3JNB^,ݨ7}qM5SQj5rSl*kO)GbS BYOidQ}Cdڽ\+)qsա6Fl5ea;Vw QP.#T@x1"- Ke,y>KŻJl<_'^G^-{JEԁCsFBŅWhP=Fp1w$p\[Ti[l!`0*;vmm3NP=%X|D$q˥sE)pЁ79o QӁTètdusbq%&֫)8ZP>;C(EO,zUZ$tQ@5y}K RٍZjVyL/_̰$#V[(U~Y i&#TZW}k7ba_F3uʽ#xQTR5=mknU#wA! -)mΐB)Ɗ$(x-QKΣFwO(I#//OEϮeNzd0qrk_5}%-SxӦBɁT<< %{?pkyy+ Z/0p{ޚkg|) ݶ=}3:qr^q!Fh!Qˆ4ؒג@EN,em %SoQK`C{zԒT02L:RQ0[yD#K,F$l~YCtJ),Q6݇{_g(BDѥ.! cAXr<ׂTs}ez0#'P> _p=H=w7_1w* .lqlajz Ñ{=_0f1'B걶m}W^H3 (-2XU0֔Z`##ݷ}zNv=;txu-xǑZ9΢eWlsu1س+QA;wUc@XVO8b59{ d҃aIRJq[͉ÅdJR&/evJb*"+5,8u.Se NU; ig=/--K'ת4X"%!`XJ$F)*T&P5,GJ |UL1SCGx e20^V cUm(QLD&Kj36}~dNbl j^^#''`ϹbȨG'H[c(n,#fI!E bY#tTVdA^>2R (NI{^6j:a4R 䕀R-urѬ.o! :FbCJ00ϼ2F+}>|H4C 8)Zheb| t WQ .~Z,L1BS8褃4CKvZM᫋ۋT,,lv0{{QkTGƸ9)T*ܖLqDK/嬌Ӛ9p dBҚfA ]1\':_fD2~TBK$ѷH_b{+F+<=VxT}ѵT2.DYޒ%L[oSB&:T`ZwW3.Q '@_[gV׽>fR- zK Uf6׀gzH_!Na[ 4vf۰g^f isZ 71dIVhSV28^(2<5k,cTmR !NIӥq4KiEGk]$dm-n?ք;Y=pY0Ǥӗm:둙'-Pn/研+/+'Lzsӳe&`s fz"}-GyT0Y)X0v1L*8zCEٴz}zjmd8*Hd+{ N4 %4ƈdLR@t,Μ,~175FS̿6~P-}~z(9l փMdA:)Fdb֏x g.{nZ^zDjݔ|).{|i_=Pq6-R*Y֭RVՔ1JDvoݛ.j's3x!#.@,n>|zՖ\*z䒌OfV*h/+7?Ƽ52O#,1ﲌyGfqG5V^ή',T#'j#0 z.I^{Gg>`ƣ}^{.3uP'šF}SPt!c%`Ut)N'Bs.rQX` H";DbtlG+c;kMgeYkݓ0hN{{~C&{t7Xd(B9%Dۈe9׎/u6W Bߎ ;TEFY{/e,p+x RN*mwrO{9Vp4OBRP l7epD8,`!26 I X0RDK#l[AEB83hu*o[Pwj;_F_zZdKZKZPM`yF=9Q]qfHŐf1(HDmWUT˽CKD Ƣ(טKXE & X.1pa KJ%$O{ph:pXY\13B VmTh X' 6g 6[x]$!iκ^d7|r gV H@N XIJ"`TLETZIC-[#!~J'aj3 Hh8#E8 P` T`u'`:..p0T, dY&>PN9FVp"V+ lHnDlti_68*͕K- E2Iy2:*),AlNu'y?r/? *&^KIGz+hsKX8CE+.m {T2#XnDPJDJ_؄ ﰀ]t A6peZ,"W)iޟtXTʐ$ce׉O&ɧ6٫ٵ[&rK&o&?_rn'|qcK~\^^'iz38y. Y;̯'u;=Y|O)r9b7na 7S;<4 'ߝT'n,dZb`Y9EzB>od/(M7')d ؏{ y$\HJ)4Q'ޏN *EWi @I*6Rl\|7ee}@p:$8k#8 5S?ޱ׍XtrC]S!iDJPLNRcƂ+Y>bÌ1PXUV'ex83r`sT9/19s^,I$ZH{a^ڿUyb{3p9NBQ|Jp98o*;Cʎ(z4oIFW9Mh9 W3.1-'-([#LzۢahJ8 ;ixfA-L8d j H(o& )}'&LJ*/'o/Io'WϋIǬn)2fvz1OSMQj擇})] ##fV(\JSE*R#)GMÛA-7[+Eո ,ۆ>jPH9WIFKr.N\[+Σ='{d&wD !DkֽERC`w:fGgW?ϗg^)=WFZm+mb=e.!v\؁+<+;W/ ٱA 66`s}VĔ=c|a[l`1/ flӽCp9\kJW%rU)3 d:v#៰Rh#V>"#-`9pJ "(-Q@eXQ)|J9+f L8e~G&Qjðt QSlbk Zj0QQ'5<cpF rŁr m)ŰTwϺ3iL+#7jNbX˘2c1%oQ>y.Tc C.Im|33FC-' ۂ$=|3ǶRNXmorsH1fqfZ/Pc?i-#wq 2|刵h["h .6٥[D1bV>@YYrRVlhR,Ũ奬XR=:=Z=EEZS#fj*8rk7]%_}TӃ/dZWP%h6<5H}%hP & eDɴ3kp cVSe7^8z\,ldARzkf9{qւ wŌvkkȾJ'SVH" eD;\i4,HJZ*55 ʈa hX (哹r%mYZSM䩆 G2J)e "JPDFmdT |J[7ufTָF;W/Z aGkh# _.Ĩ#h4[DyD1᥊hi\vx^ۥ_ B9~ˏw_~CaXIv[5~gU'M~ |.ޜoq3AO s3~Ks%<EB')Sχ'_ 1yEA'}w8|J ȍP8+1x! 5KäO=:CC0~1CC ^.uuzu:pMG{EAc ijOb/A1{G2Ĩ| rԜjAKk B\נ ,BufxmR;w䎲M{5>I^I n?N^olnyN\h;ƒ4*,)>H(5e>Io'Z[6,-pN,RN[:~,g)J)3V.&v$!߸)Fnj7 AŠDtڎQG`J<ͪ,n]H7.d*)Z_5"tuk6-ňvZfK68T[QݴYy!,ǹݷ3=*3ѿ[fԻ&>8Hvrlȑ'O-wZ&<o: ѤUauX;z 2t"oFZ/H:DJN=Rǻ"5%]3=򹋰&-CQu E!ZDt #rZX&ի!wZU1OdŔpJ!2E`Tڍ—%ԗlU\l긮sx-FO0T;^Ѡ no߬F"^㩰C1S(\ jwؗyו_jPYyT(Y$9l Zm1(1h;FKy6O }_jO\)BBqm%SBh 8vkAv;xIbڭDK[hcLV=5BRWDX<2)acqϬweABW"zƆ=%Nߴ.!Ԙ 0:9Vjlɞ g ԚǸ='l4(C/WYk$=cYcyo0њ72|:C(dύ _6 5Ch,MQG8fJdK98? gSSg TG.n=%DSHb=G*EƋH +yCA"Zb_N-O䊣 |(v6VG'HUV0U!ў9 fwڂon%W [OAH"SObt5J3x!J*=`_ [>}(`F)%[U_MP=n&mcd_mY(! ȴT+;լZEQD' }sS?LfIU"5 B59^zvY.ƞ1Y duhpOKԃYZ?IeSrr_4&!Ol5T?sMHsDL U~!DQLyI0e`U|٨~u;WrnFOӠW7S?V6CM?߅~:yX n6yi4KE;NEh"Lp 4bΈ> '3:{үד^&mz=Y16n V~փ7ZCG:ˤj~TR7 E0 sOkc6oNN5Ӌ=] }Dj6'_|tobjGCuxtoh0 QYe6:2q_|GWwi!i /IN&Wqb1+nU\:n"0GMm,{@G=EZ bCp* zRnwZm*$̾Op}I5K+>- oήOW/|F ?>?aǾ]ٕьMϑq&ɇ`L\0ٻ6dW=mҗgqyI@Zr~IJp.{jNDI1SVA 96Fq!4FԲPͧ}RFSpvbkS!/SjJuEaGFBrh HKANXKªީV Զ{cU:],B(0OI(0FЛxBe,]J(Q%i.[M[0CRr"^# ,22aiؐ ,` a2x&zh zj<]Q|g ih09&t\n7=? t\~½~J#Bses;;Kx~^췒dKs Cx\<>A,}0nt˧\pKs9DSڀvf&!:&!o@f6uJJEN >"錝,? -/01fq#/^cd]N)v K ۥ6Nӝ"bBR^4]*av-}NaFtj'_rިaj]jU+޿, ]!Y{ȄxƢؖKPcngsV /6h6=L(!5Nx醧Ep-+heq2h;0ZMYnYb4eQ.5Kƻ{{lYSd[[**4Ґ~p-KJ;ؖRuK[0}&ULB3(wY(VQ(mA xƤ];ѮdZtF%S | F{w T"H$2H޳(}=8!fR19⬕JĂ;m3~GK\!j j/!O{>* x,<0 (PEfN=QD;71MُvRR+9i=Aflʍs(EesnL#ɍIjXcF+. t]qà* `jfVEtNJ$GJ3Q .0$.^нq <:_93ġ+0pvr +A9ᱭsTpBRi69!,0ZAѺ iTajVR)Oaz"I(ͱk~{9Jt'X|;#Xi,-F\Ef BdІp\ќjkޥMFASƛ*༫жPڜn朷`Yz|l]Ȝ%%[SƢ1.o~o6`25ᆐ=Hs|̄ Ej:¹1b$>5ݖrNLR W/ڥF@ 7B$ Q|tm]($uBӉ&z:ɜ :G~KfcʑKIڵ mfO,lC9P4[ krrb+[6)N(|b4!dokL)גoyM^K*B-9;5 7WЅ& Z6SKR98q$7>s>5vxBbs}odK>]U t{7-hfl@xeƅ1!w}<ܴ*PĦ5[Kj-3¶RU^Ht_<]3KX!$9|wć5rIZ#'irwDUhQ8-bsZּˆP9ިVwĺ7{rHPByu*t\TuP?T=a$OR?)1Ig,˩5ڊ:E1A#JCEijAwβgX՚auoeVMɢ闞 9N+]j:gulb8m+u ^ bq6DB z%V7Fd OդJTu}J--3J@ l-pb;@Ap'ҸXXQkW(Phka MN44;V &*<轄&?|[Au u &lD*ZLTg ôiPthհ)wc=$zP 4j63I ueyqà)xn64l9W7]Räbb$&S. V,Sh}۳<kFe̾2Cp(.&M sڝ= "k{nD͈uʡJ]!3eɥm =[5i+;vAvVCPֈ`@NuWA\ uRшO)-I'ѕnDhS9?ꇼqۏK~\.`|?ra6l?PK ToRfßGO!x?{{unf)>>a/hA>MzϠ?pK0Osгx7Yz5}zqnwPH~}/9'wAv"hya$!0mIx5wިC:ڞ/in&*%a G=!I*Urci&L ״+Yo`4G_W*cPJytKSbf҅I$=^83>NgK (sJl/.RjO  (~i5X^q'׿sM:fm6K ސoNBiB7cJVSu kX +whӚVyxKz-4j2_iy .j,gi4W3@phTg6$#T8n [+ I 1`Vi$2A0xo"-Q DrکU\x!yဌ)𴗝n%T7xD 㲶nֵH(;'ß>\P@ЬqYξѵa{n/eosNqj6pYC7,\UărjĿ&R1+.M'>uGvaS {م 0A`i H2OS:c-#{]i?s]BoUeT::M _>忠|ȁ(.kYMƖ{ U=N6Lw yZ8Pu#_ߦ aO[+U)B)ݥܚNT%FT׆0qKzeh֤֥S!v`%fNo8jZm5O8(Mq-"W5w5EI`G--ަo-TIވ"}cqyULe-::M{?6[ha;m-s2΂4f崢f.x߸^U1 @*JQ9+hOe"H+&%]h=c#}5 sM)߉;'TO_sB: &g;LW-ulo}@4gv|%[".@P¾W i ]AX~_ :2ANHAWl ]Z3!YlhK-+:tYQ*m-El̐Dl8YJ7F3.~mA9^ :B.R!@hPV.^c3䙘s-%J$Yڍ6!#bzKFxz𽋅,T⏟cY|waQwrqcBe gxy7-~N+{q7\%gwn|s'7ɖQ*O]_|/"0lA#>7 b7XhmIΘ6:tIjȃ*E9- ]rU*&zS1Io2]q_6-:?]^84S%K/d[VF M6}0Qr^xjx, <-J &H< 5_vLkBT3}*a\KHJ .;עrJI~doUZ7|T>}zH_Ȋ95M^UYJ8`c/K!Ql&ʚKqmcXqrx:c}dgOhFt_Ŏj¼tl5e;·J"Z:%`@H.˰TAZ)AIx~ϗg*Ik{c?.Iw4~|6eA 0wD4Ci|zu"Lg!^f?hČ6IhČ6)3-G@_q;ƫ jT4R H d`xAk\8KBB~MaoFN#|:{ޕ$ٿBLt/kkE/0򴈖D|̢DeCbŢ,-XEٱ54?ͪ6Xk>ނt8|ceb ~io+-=u: &2_ 1ۋIwzS >ZL%.8 %!Fsv{Sj0'ND2E: \h SXi J4L&F)Sf-2Q5~lFAf )G`8U\zAĒ `T3&kk@G J14@eǣ%'5{1R `[&hT}p=cɮoo_h<TȈR<3d2(& # ey q ?9u;bЬ8h<e<{3 5iS/LDv\֠rj#;A`^l4ZcD1"bbp)>Avq*8ь#O[J:ǡ.jRX 70[SηNϘXEB*JQ6ʧ (EQRВl#*VN%4!NVr%qHЍ.FOs%^GGHPDU8HŖICY{HR͘H^P%f3ٲǮ17cJ57XRLnn h D!md^X7]˄MMm(tiT4ox)Οamoo8T }[=_Be]#[ kz&"=8T>,y% lfESaVrKrջ%߬=àZ fE.mBSM^55ۥ=>">rc}Dk7ljfjExiϐk?@_d#/^^ϾxZ=xA~G;};|z}pt;yﯣ+f%0.\ #\߮Xj4xwqq|5p '_8^+O7j- ~ʪ;WTS#VOlqV<ڹVHXG>n>Vo>\YZC4A[C]תlӝ+pOjS _asqD\zasIR#_ɐwбkmIBĞo^)9_&WIZzlm4&_a8oAJ tW8h uŞ^Qش+S~w_{cvi4Db"GDNY%%U&Ilzl>pᎉeH^$|9aQ#D Pxw<ݤSy. kW-cbQkڍsb%$z AB62"x$,0/:OF:xKd 0 &0#}t(ZR}v/ (W0Oo]$eyx}R &E-4H^.pZٷsGo>b4Ā},+jWH;e!QF+e4=~ XUT_("{ә6|us'e"B1,.[[79;ƯoYyOtʜi:L\^l>Hwnu%D+mKNk6K ȇK>P! H.0 o驛-S=*%o 1+0 {]!G2kz[u9CZR֡/3} d:%ЯiP`jEmn$^ƙƾV*.ȪfȚ1l<Ax&ʢ 9d@G MWdeȲQkI 寴au;OBңTMckT̈.'g( /L`eZ;oUyk0:t\lKwL%ְHɗk;le/;75$v. kw\|EwIPy,DJR) 7 $E!i tMK'zK,>*E iϐS=n4&[A6aG^-&$ |[DR2ݑ7;Y^TVmTwDGbӝj1vBLWէ")r/EP2 g*A.UZT?n1'[1pZ̞27F켞B34yr)Ji%jl3ohP0رmJg$!"x\'-eR @Q1-m5D7b:oNjc ;=U.#s%mn!wy#H F~KΖ$5v@umT$]J Vh/;$?T jE9tsz'#ƣLօҢe;T߭j*!l#ǺC4z."^P3*gx2B2uCS?B6ߒ鈨?9hG/ؕPAmqkx_ؙ[J w ">IFSbL/#*ojQ ,FrD;v'l)В7 }\輡VBm#[t-s滻OɠͪY.bBd?s>dCހydU 19Hb1Wg8;% O$+M%٢Ԃ̰JM!t~.R}OK}(5hKJ\eJ)Yi%5/2+eU^E?2+^bGiJ͈%~_of2-|`J^^܉[dD&V"jn}N`Ui#`%Q& }Bn0&-].EPT + 4늪(- %j̲ß@#M.=PQ k:eY.< KD@-=;GvVf;d>gKRk^G*RANhwJް&/3v#J8!vZy*0Թ_O'_f[V iM<f Y_\R?@w\R)NYlӁN:>Fl4?7yTmBu&j Y!\j%EԁoS%%909^j!rqFzbpz%1b%F-y 23?BZPI-6[NjPE5@5B!YL9"KZ/ bXKTF/E'#Ӑ(JqtF;Q3Dk29M[H \XV(q89/0F3f+jCH\qQjZ(5K69dgKR"Kp)(ܴlDaj& ~o[I_04n>l=J6o `˱(R:}$E ΅=NʊɠݍFiC"G"pDI"G j; %G-h^bD(Sq,l~xa%2sB9@dK]7IJ 90Cפ&[i>dJ@cZ}?3#!Ji@JuIфlz/ن3G] 5HlP Aww?&z͍anOzz7P1bJZ``h Cuc o1z(X|]rX9⭇sր?DWV`BZ A1Ky&;V3Dsa)sROц8$s#МDëN#гNV:YdEVz#сP l Iܕ[#-͜g.*' >F5kEU|7oR+Dmp=U4\}x6 YrIgoG|oS`R'Sh0֖DŎ1v+9CmV3_i9yS}ųDugj;_zЧvҎJSәܖAT :4vTGꙃCm ^^>WiZ/cp]3J.xcHjj Έޕ[7V/ʹXF(R)AYO.-̼4e:ec坒ARGW/QKo 4~6y-R2b +#uR@`hI QҽG֡U8ӵ-.7^ieZ./ޮ-ۮf3jSs/̞U-sӌYZ @-qa3SJV@ a\v3Ae~[#S%6ş]'/:yAU/) Ji"#)5JK !JVKF)tFM_)_jP25W.y}(` "#8%DzjW9vܪXE/'Amy#0Xkc}&;cl!/zaAT+tkR*MwQx)Jf^ђoLZFkyrUI/ L4P@t*S~nC ,, ;\!`5#12]+iQUV?d'}M"E,l"9 Q! Ar8 l4ŀ|do~ g/ХRޫgagύXDף0L):9rC!]0x"2U_\)o.pK8437~&80jD{ג,٣ ZB9Ńthr y?B6 ZhIe,f;eNm`>֨ݎ1>O2%glG0m?j-I;ܠIp87hPS. IPJŖHJJS?`yA+Պ{*ZA_@2xPX8+Dd5jNp:mfȁ=ioT^CGΓ^ʸ ${R߫^]\ Z&0RTX'Q (ӖQW /itGܛ=ZQm_DVܶ3\ups@VO?1ޞnWda>/y: 3Sszt: [(]x^}s5n,F蚅mN>nwfn'cGdpgrHj7rA­+üR:Brּ?~t}87ngfEf@[Z! KL>[NDpI>p:HnfW`^ tvM&L]v'(ZY=ZKͳxJȄFH,~}d (sahʏu"M֜< !xaQ"[; [}KýX(vmj5 )/2?4dG{dLzdPSeim}gNƳk4J=r95yh#7P o?F oMB?^W6%ˢ_ck&!}ef`i_ڇB^kp}n2ܼߢ8TRXjf?VUc1rq: M8?CӰc/ Aps-ll}gƏP9ZeKk',0vqW̓C,r )9(Z;X?J^%gĽG:2d8űG /YD8(<2w}ОWN}4zHH%+'-9=رx@s?Q ڑ_p v<#*e5iޯjAwEqġa>\31t4 gDPPA0"cM>qNQ!O Wζ2,d -9PdPU) =F5斑\M9hR7iuwArIPdy#9vN;NSKJzDsI@[ A1%$DJ$Z'm0ٺ&%4R40#"YQgO`Z%5wNFKjp4>"X\|vN^5]#ph?HVʿƹzH/5˱m'hf4FFZ;=c(n==XdU&@KFz` WV:Oip߳=F$MN]z'SۀTF@_X'13VV,S$Si]m9&g[Hf7vN8NpBOmxxʖֶ.El}a v;q~Np8Fwāa޽l~vGApza!@^I;Ԁ[}j1ARں]pJM7XrFF\"4 mL_vj*JWKyCI?RMwj&ߦA.L!")g Nx$:IuUo@+q^ ^8ZgrI:bDpfl׎|~awWx3.W U}\t㧏e*JPpƼAg5G7ec5#jEd-iVHTF+oOYj7T Cec{PDo/Vt~u5U*|E'W?7if/(&3D!/ A/uO?R 7רtK((V_eօ+6mV_0y8#wϛjuD8.ru3*Z/ W\O!T^>^5H,Ԝ ;\w&mեngK=uϮ0յ|pӏ8{ʈçQUbFnMd!TŸ'r_3k& o9s\JPpD+d&D)ʛ!m2Fv';{agxl=k7eٽݑ'`؈,I`IX"i +^7[PkH-vwXU,֣[Ӫ?)EjQŐfwwd9>> ɟ(i;idƹ^xԳ6K D]6φ[QZ`*JKQ48dhAqƾv+@Z7Cd]:lY@U{-4@/DYgkv1WV(E"4?NWo2DP6tyoKN#QD ' '$,2TP׎vfj$;۝t w1',hX{X5#,PNn/Su!b-tIQ/삪ق$;tpK`}XWΏ,|i:iݙ{6yg_ tl(pr$~Co |f.>gW2uUӽ|ٿWI*׎z e!Wn Nz˷}}ßpbR5}Pou7^7g7-:ZKg׳ ×ˉa@ y]Ax/>\{L{lכ=c_,;*0_+SҪnɴ Rs7 `r.)\S,.~wo?}!{0 T/~ nɒ Rmg?]P]7A2]rK30I7+7~ټ! ؙu7h2LJA^F?2ޘ!)\74ww܌(?`p@ͨރƥ)z_ /aڗwc ʔ=o@O@B| c"+§Ib, DP$Lpʣ苕>9nj_iHVtXI'Nǚ$EB{Mr1O"52 (Ӡ$ʇN|QX< 9ʪR*lv@05=( D}Ϙ߁:&Kh_ g~ *>[?ˏ5,kۿV'rch ǩU ʸ5: 'CZ$VN*uS-q3K0Ι@$$OI%Np:iY1P2 Ɩ:J +,P:mr{ʈ㩳將%eՖz텙rKP̾9kUd*M%Edfk_DEd!K%Z+V.W@pɋ-fVM e!ry\ua$P8jyu# I3sл顜P}8ׁB&T|<uNMSuG@8D\)Rݻ'qXyVyłXiuE<_?'޻ǸLvM0U6#()6Ag;Q3c޵ioxƊ-")L.}8S&n("!tm Y9R[3fxb8B8 K8mbsTr*99̄(JQM>E@uqʵ'HMQS.ӣ`*|5r (q ^ ZAUH)u+"蟶JD(k++%aLUe"WS*N>%ΩTgQufXJqEWJxn-U Lp/w^g- Q1wC~.q [HcH/(ROR/"RYinj:11I©JPhL @ԑLe>./%p&1g'4JU&!AeP). ЈuOņ)&)XIJCM@s vIR^͛ V-Q^~_q'wu/FsO<߀”VPUc]oP0{)k|Yc9÷8ʞY6K.[7DžP)'i .o+7#w ¾m߬{-Z8 LÖ*1-5KcW"*F{F IZbawh&%Wbת 6,)NJn3@z 4jd10hj{BHwlyt2%UEar`@ Xn nz(L`EKgL{G .ӗ~2}-̋Qt(/_J1S*ŴTSCco.*ΑQ-!k٭T\z*ZI*hQߌu[za,0]%PՊIRy^쥪)7k 3`iNkVuhb1\S iw|`ivˢ6Z0h!JϮq FRܕ6'v &0\+HpUb4CV:HلaDKfij`Ix=]!h#5b00FzKP\4Z Vg{*izIX' 5Ccw,f`ƴ7xzZ{4{"4JN6sF2w i_IJLBGIky6[5ie+T}' G>ocR7JM$qs%KBSBbUD[8Ֆ__s(8%M"[xӨ.  E[-on!*sh5Xl&)"WY'jaQ\)ZC-0K5(3I(b o:oUirGneq:eQDюR՘t+)ݪE),@x}9jtQܦ"ӰDD,vvwy!d{;pq/o{r˼/g^^P8y%5kpRX< &ks¢\8DLh6k\&$I )([@iЧQ3sy B.%8*ܣ>0>!QZQ,Nc)<$J띠q:bt*N?UԘT*<(i/C,cRh0%+9sxU!U-g(:WMS.˧S_; R_,=\dw2)j\*e*@o2'81B$a\X8粔k뭟{ ɼ޻v+j"%!rOƳHv`|O"9+rJbgy46g(rmq"vU8ե nEv|3nh!QJ}(6#}w9r"[ZD녤=m*ثQ5kҀNnlyiƈ9`N^s*!@At *ĤWJliX[M:!UQo]ɬܧr殺 _D8":*YtG=~\c44]1Jp9JpQaq KY8"r|%Q&[+g@ZØ <'B"w'܋m3泳M\/y=3 -~@8/Wld:xugtSO=$V.ul ?2iHE>==K4ע33q2{8mڮ%;M6`\k]1vi#Ǔ )] uFY*JҙJ*ܔ>#>%!At'y'6>IisqRXQ@4ƦFlЧ\n)'\`h,@pQM,.9 H 5J.t薇\f$׏}r_YNxڎo{2g;O>~ 9!2ܾ_O?"vw| _/:7gar@h¥Ý ^7FN֎mNuDDg+z\D* fDZ֍rTF#.25MN+kMw7' }tB*M E\.HGsEl3Nv&N81RBC: O)I"XΕ7+ڥSf D,O}MF@ ј"P0Mg"~n B+JnI-Cdfpl Qdu4IPo G^& ỞZ9o[K{1r[>y}86US82ֹ?k\띀BYee%=-84C-W{ػ)58'd}lsÁ_]t&_?\2]>~}xOn LNv0ڀ'qhqv?9<|Fax!y#譴@ޢF(1d%)Jޠx<[D[(橓y,s h`BCVhJC:\&|#) TnzqF&bmi^JjRԾTHPbʑ4a#!_X*}#u Ѓc cYߠOMW5; UTR)AJ;THy.LJ^ڡo0XIFPZAHE_9S:7WP 6!{7uSۡ\袵p.K"D A^(=o\H ΠnW`1 _8Vbށb'ݨ102Pg=:R(!BF$4^TjOAz+Ң@'1O+^u yla׮u0wT\mO԰E{_ɿϬy/|Ю1RAI,[ `Zuv52aŬ¥#r]Ћ}#9Oѓh48{-h;pys;iϓNI'g r0oOxG5p)ㇽjpuyٸtO3"5< {0o^]Plxv> c7=L0`I|㇋ŏ:1p7Yw7!ߎ(,,,yߎٔݑDžU+,q"LPaNG^s?/&sySM!D7ThOgm Y4fa Q2i#PYst؇?K%W򅂢>wͨ}7AF47V{GԍaZ,0Nz۷»XeWΓ@Oϓ&kSɩ9+qhe^_F/@F;Ls= 3(A3Ph}-i§g;٫/,r&Gߨ(qΰ5TF:JT}%#4 ʨl0k~߽{v0=>ո]Z87<=U 5cs!,I$'=RMPRN'"TN51vф`6xNX2i\jK;&7#sKFlQ2m!56wf\WۨAbߣ0 s8ϋGdZ,TN"j=ͦqIrLo ! ?2堔T')p\F"c q>O4Bԥ<(X¥)5;씳$Y$^qLrG(= =ʄqЄ,KQFcн'f"q.g^K]E$P!6\#Ni&Z P#-OSd`ȚUWh8Mw 2(- `[5ZF`F#6 V+k&];4hS^^rev ~RWFJ+Yu.OBE+rT\ ֒6mDXaڷXu^2PX{eYrD HҕU& v,X2&,XX[XPϝh񚈊#)J[GH ?$DJRڵ7gnנ)`_/53|="ŵ{AW#`Rm_^?ܔ/Jw_1*BF2 +_+! ]\9:b셛3hHK]Dl&]3h> Mb+BdkXdlռ00%JU 0n?>~.lH#/ˠ n,b&-)D(Eۧ Q0%w;B3` n\Nr d6Fis" ZzbgA3;m5g&[*P*AZZB\` XU6q?2XBPdM/K $A5gӠsy!!qҘ<sBN՜)aoq[oW.NK2r梿$֏~ :I"ٻmW4|mx⤙Nߌ$@KHr4.iYH$ERgKI,X,x ꙯r@݃<#?/z.?͹=`oBVQs4 ѝq'DAp<:a]0dq=ݪRAZAo@XH2$k#uw">]O\F;f#L;m,pA_N[{{;@_m*,$B]%0f[rQ LCJcJdfت yH'nSlT),3V>KȨKv6YU_xq*KJd93h(Z0kzn NnS|%FdPJ).D`&؜R tnlctNzSP/2䎇3EmÞ$f!(j/ЖDABTD0ݝ͒0;Lʘ(RIc"?*b2I !$P3w%X0;@4{L,ZB-\G$*U 'ʓ$c Fld( .kɸ@EOдT֑_棤l&b"3/*{{`r)Kӻ4%;sϴ i$00zDnwVkFhYYb4BQHDrlI((N1& pƂ%@5}DzŠfI ZTB8LaV|(%sPcJ 끪]^umy3,Ҕ%$0,z8$slaR#CP%A/ƯO%sGTO|x !B籖UPM$3B"tnY(2b #rps3 >o] aV&"? ӫ{{4Ť\Ge?1tee%?6e#yIl} $ [gk]ccRs{C%?@GZK.!|4V7dAa$헅xsgq`P 7KCh3lQE H '\SXhrT*? 3qT //21I)oIh&eJ9͸&Of-vxDDt1z7<,>,^/f4xY:m"9] jBю4 O1s,9,|ЀP[׀r㘣XGJe"q9˂~EѐrǞGro/6C>,eH>4FxkmnnfG- tvؙ=:1rI5ũn 6Q M 0 6 K0%އ֖1O}޺<-=ؤńmxYrKF{CbuEJ_Qu;j <^,)cO &E& ~&Blt&%;?O|pD43ANvlg_G6F t@ t@Y/ z%ű6qH+CDVd ߄$D !lDj|pJ-&l\ th9[PqVm0Ġ#.MK#_ι m;0cZ \AD D1w1 Xs{`f#T!ؔ:}($STGkq-Y8`+7fE6w겵Ff%o-9kXHkj'=ŖHe{"B$V-/uU<\ٶ6ђQ-ǹ|Â1['.(:&L J0 FJFIBt8ysqI=g>!YEQbq% 7Qĸ۲Hq9퉜>%ZbĽ8q, ½xrhN 8Kc#" OGK\FIp$<2rb]Y6$K7whl4]п}j8׿rnQU&74]^4Q{47 J1dL4mvAFju x2ȼw:y.3&y9XҠ=C\%<@co"΀kKtCF7R0Kʨ3䜲 \4~4RGL%&.t[iF`љ 졎-o1ɵTl8x^Hs\Ucz- 1~7۝/;[isw/ ?oa낿i7}4cQ+]w#6s;/JJUvZZwPRWY n!^3zlKZ| \,Fr8*-åN9eӂW4h *9ڶ!3?V`xHA xle/~ߺ ;!e(O{* Epڻhػdfw{9V뚸Ƚp}"! I]܋ĴFٲ_PG@q>ڗSwT0Kʨ5\i ˼up\?C^G5t&!)41n3֦hZ!b49`%Y5\ݖ B_J:!Dy;UJ:ͯ*SR*8"(wGb4JDAdJúΦƌ({ ωh JnawT#TYލ0{.F@I%ÞXR4nK*[-/|Syi3#pR1!6kԥ6vVK:;v,l[jޱmͪΐ_:X"CMT/ a@`P,P8&, iB`ei:QҘͰv01]Τ.HwiH]"oV Q{g7HZN,I95ɋK!y|^N\>՛ߪS+tix5dR\ #`*y!,Lx|k3p5 ;wAT[*rvɳ'a${N[{;N}sGhWpؾZ:۽adBfێIBD!J*61$b Ż{_~,|ӑָ6UO]*u-hJ+Ax=WNNTv+'mIYeSIƓ*`J{/yu%e}Q;Tك{LYKLqK- z 8QÏUc;1 8/68cJbkXO> !̮?q_ZR?(Ckv^Qd6M]^\6:&^qt]NQѠ_x =gȽaH(--5XZmtlMuII=xert/G]jQڐp{E? C]P?z}Cs;%ݑto C^ \\G 3@^,9*|h=pml[8d>r:~FZ1Ff{ʎOzrzܤi-" wlc` Ɔ̳a~8j={s8xiGO'E?u%-?#Vp+UxV"4Ñ Z̴ģȉdp1)OԧÍX.IW˔XRqb4) .Jw!#' ,K, GI`C[aYݤgSyY$IT,e1RU^zV^BTل;/'EuE`1{Q)~y!p M[/l'6wm n8OGݫL7&]:kEڡupRGylW?]vèiЮ~~vf@y~Fh├5J{}/iO=Ap2i70O{tݴS}J{t qU4qZ^5Fwl!3d;uMK+2q Fb1Vh"#s=Hj `QdF(ΊnLf 1톱0gOc yl?t M-M78+-+jE`H5]h gRpBܯ l8w(тgm` 0ܓ$-??c&P؀iAyuߎoFRvߎMحEI /o"MYBc0. \\IY$fۋKv'0ebf%VDG1ٻ6$mHwU<C8|8F?#eIKQxW=RgH[ ÉaO!T8-셔6+4~,yŧ`i"܏4_rVtdFt@J|>~7V7Ru77>͸u~-ָfQ=-t_$\h);ytE/\o޼5.=ks]!bM D ?>dZ]ڻ/Pw^Q$4-٪o_^'l8NSDd=ݏOF%ebb'%@m P{[޶V#r $^hUQcHP*)g6)Oяe>K{~?Yn2G|/:F`7#ĈěyjĿ;Gzi#!@VWNvqP&h3;2$! Y`YW)Tx! 3̥TKGm^aAe},_ > +DԦx v`BfD/u$)CAgV :71KҪhTfց#F JdWerMNe 2_3iyt:f [1xieSK3TRd4dۃjZ IuV  (!rYN(]V.CM: ᅕQ37rdܳ=p%֪$ 8(JG6[0 #tB b6+=Fz#Ve|휴ۇlwEkċJŔe~Hǰc߻W5nfEͷZu6fkĹn͝ᅧ*wv--'҇ʤ*)#͹8[SROrs8V ,>E@5{2Y0r7>\lOĆܐ߿|+T;V=t7߿x:;:|_gE{Hyh>!;|xt5o}=wS='\EeR?1-IM\^.ʁ)9H@X^Q9f7u%zOm*{ m?s~GP+Nm|NcCepZ~hG-4_NfD< &y/> V{xɽKm>-"s:-Akgɇvb uMȳOPA֑W~iW5{BG,L B%m39wFjeBɗK I'/wU[r {{"|x.]r%q[==7 nxqK  qrϾ/lC& &fg_Gy6^x2tL¤;bι^|=]rw7|!p/V{ EV@5z A=. tLY_d>ǑMqmLu; jҴ_~4Sc{ӱMtTL# Ғ%'%g;z ȭšuAL9C6k{ b禆!F/8K*d m цwA @(C{nH"r. =OM `P%LL.ZIrCNq{O)7ڸڟ\JƷۢҥУzGu;Mynq60ߕvϔvJH;pcIA-x~LN9TR &>o+~Dő~\3DZ'$Z.۝pPXҫ*USڐK '$ΎI݁rEה>(RAQƴ~W=Z_Z FM&^Ew}/{x>H{'xh$^ 9<S.{ԄE ЙXza鬢չ MI4ǯ~ٍh YQiepA0DlC7c*KMN|6 >A5#l 8ymГbHvkҌ2LT~b(RS艮Z#|vcvivccE{݃J/ ZCgs7tu %ȞM9}/eFcx Se0 Ĝ&B[rm2_l(dZ WO뽶!Fl--ISLb10s6FH;ֳ~wg6W1j;Ե) `5m %ZLS.ƘyǷ5#ZO^JŞ`tc"]yěs[m݃6V;f:-~^e-c5НDVl)O(xeo9@_r@ڑZq~ǍGffP)f[\qXv l]2/\Z٩ oa>\?_&cF/Q`A)RV!/@[[-ϡLhL>ߑ}zhZu ;>2;?/*|nCl(gw.KX}g˖ck}9f0=*R5Bp,'m3S޼@qȧ l+gq=x@wIRWW х5I —{ 2% *ŌW2O':$QQu2S]<3;aFvOU0YbYwrpIÜhdDe*PS u&@sirAHIhhfR1rRr>)ƚcuvk9i)K(fX藴- ܉w, of Y#PR"]I8#agp*fꊆ5V{nʜEa+(nb9DPb}W,D 8P%piCdxM4Pѽg +=eIJV3܂Ƈ̯|BhE 8M55LURq3v{ T褟@r"Ը;\q}G-k}t#37`bw nfC1 o`쑒r_hW8>V0n/fc.N&|qc#m >?A\a08kY/\PZOܧ)H`> #$Ni_+AٟF_/ >"HUo_І6jĶS^Bq"eb'V{[mm[@rj,(΢M+}2tAhq}?ic V#d- &^KԼO杣YZ/_eC6)jjtOUnFOփj: Lu\sxϳ.~6lx }IV15.R:ֶB_''8jl_EsW$_ k-]kVQ_]SMJyA;- *O{ B{Akm#G`/).=3yd'md濟$-;b7XA&XfbU¬,5e˰+UNhI ECOVpX)M[> yOJUj+8-%R{_-*}1TO6nh['}̂&r N (֒$uBe1H$2Ja/ly A<|;ډ\O`nzs^ACfyw\..R`dJA2PGvoi*4, [LXYQ@JJJ+-,78QR*W)ޅ)|Τ =V^}cb8Aw,T G5, DFX֧<^S\~!w;)|'-MLS1%J8ּ3 ! diE4z$! dϣyuͫh^]Dj^Up(,R2TVX $hˌӀDK+2s? em|g|L0S̷<@(;P+ GCNkoCLIƸ\h_IW%F0 Nцr^ Ȩ"5Y9T"5`W d;n,g`Ҕ0cF(g.0 R`Pl9f\27L3XU0" 0), 0rstXGvV789 X78[8x.Np0"ވFh௴sq350Oc| Cc<_ $#\§L+8ֽ[AEfj^%8_0&svm?ͨtpZ!s=B*T# aV`ڭT -T?y|{ueO*/mxݚgeA61h5ogm7g7\=lq0>4K#sOb_b ^3f6vW#ύH|'/A!a2:8Dj=wEVJx(#nܶxڰ_c.^ v.s{BQ̋w|ʽ_mY5#;wYz6?2Qpp#"\\S޺{;_ݮdӳdᢒ<]UeVʈ$R-ZWwQOӳ{6?|iuHzNТlhy7UV(g5< UR^L+(c& NGʴ]13ÞVR7GFFL571I O?!GX=ݼ%` zǽJ`ာ(r`JÅ.aApY4[? X=)7{$jAf 'NGŅTr\D ug>ɡ>ށZ}8zD oq\8)q {+sN*|/'5^<#B)qDpAU©x{"~yJZ+9\9 șs8ۦ,1{T15X6q;y~Y|n!3fSlNưqػU92䕈.n>Kp _Yl(v<骐:9q[&4EC=VvC"rJDNl5yi5qt9XL+B 9N]`&XZ`F|TU(R3,_ɔu $mLYL\V 3e IEАhy}J1d}bRq&NK¼f+2bb&H8$ֽZZ0;KUSzP?S̩֐-[篨vO+STP,}{rv 떒*,ڃI?nKWuzThm;a"XYTC6~=./z=V<Ż^yӊ-'_I9 m;Q Q<\הI5`?Nr a-;>j-JgEOک XWb=m3ha~8 PT\aNA"`HDOUpgWXYM3`I%b| ㈼#"ȋ8"/#Z[,A }PA#n.cQ}JrJIxN9%DNs*x硬1gr ;[AѝfDXGɍ= W%Xt-[ʃ' q$JW$Jz85Z^qcT2khFMsiGG'F/FW 1eyQ|)g& ْB_FnTMꮶHC A1}`22 J抜3)Q3(pn-59 X+*OU%/1?* \JgD5iA$I&ۇi;t&?sGޱxGD`ql'><0E7ݻ).%TIx:g9'NY* a P%vڀV==o}D$go!<`^,0VSU:P*1T cP R9ΨZg7INu(_jeZ$``>Z = 6s2? S^P=7l%Z6YP !C޻q0>XC mr`Q^=Yʆ|0]rJ$/MMD80DnZR( 6hHvE#S5[ [PTb=iѹ=jN%"_Ý{kٽV<Ű3x|}ęu/w[mD1wAдQ!ܳ?:nNi׭11[:sYnش 'GiKMiJ%P T`(Fp!pBPl )Q[,6a [ Ѡc|O8Ɓ/#Av#*@ݤݥfHg+ /xN+%/ٽ%ɨ+5{(C2f;D*j)}IȲre@Y&#^Cs Zy4p4! e.*&Z)[ݕɦ5CtZdM+vwѹ LWϦZ^ Wh3UK"pAdP}w ٗ|, t-wID9uRg墳ۓZV<5ҫyD`7Wfp}2SNf֭v?{}7[-r}6^!>l ɝxVq-[ Ɨ7gw#:`*6);o=I9:Ē?ף'#q;ҖVj!+*ǶTU]ldjQ^4)rblcU0XJ eqೃ|Yؔv-C~0A%tGXC I,5qD)>犪3.12=|;?L^A< "XR>#b-HQ, 5mvq!9Scpw\Ipki`ȥa 3{TYZ CfRiJky^t)m4kG/ٿpk+z>ؿӬ9m'&/tDKCGKdxM]Vdh, ZDˎihN$e+20f]dS­a?$0JU: :ډl-*)?Ft&YlHG!#t _`XB MUn>mk#fRZif8p#Q9}aHu08)@M")F邝JjhIҔfklDXUOQlh\9/q^5|QP:tQ퓺?E/0Dh&gc|$9B/܂/K!FN.q#UmMe(mTr.؋B@M! 3xFl&4ء9Z L0E%CGx[?k(*Eaʜ.UiKdxo&-*5AbZr@k: |j'\EŤ bLjktJD#|sMLp{v{Wb͠ o~3WWզWI_m>y:Mh{J0"&0#Xc BN})"p:A(wѽ\J`-9сX&^b)(JJ<&zn@[ҐZIղD!قLY Mp65s 3S sDhGcJ&&)AqCe Gp4F ))<`8˸i33x¡)#a8#VJs#z ޾ުh7ѯc)^&ÜmdrA:Pr}4,{.r [-dBn]"^v/&f$^'.;Smg+mG=5R xʧߑwo??d7AX^$0))D`U%jćÙ ۢ=zuuyͿ+FXKʵ g-^tE-41 ]S@0$bl 6-} q5Fb!Xh!l[kE;jpJm F7{rLpCWRx|a۠]aιRHh`gs-?]o31f=- Ys]i.o #*Սz\=v?4_-Ϲ\{kù#|SJ7^Bg-whJxɡZSluׯ^|[ ꂶ*@3I֢4Q>T.-z[.Tkտ1*`Io{0W g,K4S)?F/epB{( )Ǩ0aWuV~Æ.˅@&& l0Kt`4| hno2 *!9˾Ʉ4E%:I ;v'l@R >Mt&]d\r>%2;KH@8Y):%E OW ȞgGKBWHeA3 A{#/qS3keG1*',^ /YAOXpˠ!.%di%+B"ok"ZAﰞT-T)6ajť҃X5yAL׀PS ՙ\HO#GW5?dv;ç[Oq rڕS}1W/+{x_og!-VJeO/~0GHWApz)iI1$0}GEWvәL6[%l(ƌqi/ykoz1Ea y.7JL2홗^(*!yFlO 6ȣ#Wz'F' M4U\$TiKV؁<'es4 8=w[c(f]z0x~9_̾Vl'儹/gۯGc<ՄIzV/Gt5_{1 *\EK/_ 8\I=n:wB9iϡ,WD#6(99.S}6nVQ#K-"pbjB11R*Whp?23{Lh ;("e7ߎߋDZ" BR/pd FQ @'j ?kB\` IĜ/{J w):rWg!& \y60AzBbLjBxM}\kn_+4"PQu KX* KPcP-Бv%/_oq: +GӕuiFS83ڋ=ОgA. әE>7o7UxE1To$V[ʰG{e]*菫2Jtc+'_Q#x>DD9C6I  fxAz7f[.D _bZ ')z" DE2{4Rsi`Gؼzl6kkI]_z>d,.ng%oME"߅ _~L? Za $V+G׊ ERL΁8/DPj%.3Ž]֊ 1L~#F u( 7L#] "D R"ZqJ+k"z 6Նq1I(Ҁdl0DFx$L&#1@yKئu(Œ##AwF|nE1 [a 7&l(j^%N-$hHĸ:cm5@=!lH`uº-YmQTmu[xEPQtuj |Zk8ZN0ǂ1d t@pR.F!2q@mɢKsuh'dR*Pb$I;l] kנ6D5$#ڥbچuͻK*llv㗷гZ?E!=짷7wAov?,Ux}_UaG'c,CA0 <_J=}>d)=7UuԞ$;N2%[uT7>vJ6S8vj>$;уeJq.>L%0J Cej1bLzQ7{d2Nf4QM"(8_8;j-zdE`Pt9Qlp &LTHFJ&q8- c,4N ޺q4##Q%GZ@kP6zLV:tBea_ `Z2|`)QrE@kЎ K$n(@-5p(G`tY.V؎cR($TAs c+t#I >.Yaǘ+IceY/Ya-Dd=zfY]^VOlc_S aC,!a̴2J2.F\:؈GK'&r)!B 8_INi{=u+פ6L~n, zʥR`wC*Ϩ:6=.;Jt?e9@gEc:B<}mҟK1=\ųQ!iSzT.I&p8{u "?vBB svg]6Ԅ g;* oF+!on#׬J:JD7Ҫ\L+Tm\-yY6V V*-E`%5h=>Gw1~ EvJM؟ˤG)y7T䤵Ń1T~*ו_3sq٤/Eo5yR_'\|zpӁ(`m2xW p`sܧ|Һ>5Mf@p 3Bڜ|6h&@N:ʕ'g=eﮭfKnGO49][voZQrrq `gZ)S>cmAtծ.\$}WkH5L[]Xmn_Τ !IRI@/!x2! FZ6F@FȠ;c 9&NnnՒsHf~|F1XLd8Cr0FgZ6tM ;poisAy;44crtKN䘱3 :+_ c"+EXKyZa.˖XeE V(k4U;UlKrvP (<):ݎc!SooLo1K&%),yb}gދ{zp̼O-8 Dsݰ?aj9Ybb DɹDXw5{1EjMgZxH_Q&~XCbݬP?o fH[=ca ɲ&X7?v\3Y]˝V} ;?f6SA6ZPF7ֶr$T0GOIN\6%'\TtC[]Op 9 `d4ʤpY7>m$h$Ou5U"*7R $5o<%Ѯђ&+lqSċ؏20`A(~!CqNW@c5PL)5NF15јh5lDjHk ZyJkPM;15bo|cS dhSO8\9 ~_25J7Wxҏb|>@婑`i_'P!RRac%&c܇tSOI*降DNetY(4 3Trt܃Γ{yr:/ *pfc4(BO(ؠ`FS=1ØjL [,*2*&jN3iZgY.Z4O 2CLK|w6 o x6 ʜ^+cpk>0L<5R,0X )yT.S*D3Yk8 .\;vLc|.0J/+dh׊(3\ls  iP""QyiRLD0=2ne8̰GVk;&k zg06^aD{FΣV`1(%0I ZNrbQ)gZ*7ϪE7i-N$k]d;e:ԁX1 @6 .`U6uia #;3YMjVR~ux^~+13QչruP٭K0ȇk׾﮻0+f ^j*x5KfOVVNVj870:BKos2YC4hqD4(F%oe%.ה0TH22\hh^B :í1l4sdžɶH߾}߷:ﻵߪŏݣ 6?~:Xzq~z~~|x]?{U{zx7^OY&Zn]wAF; 9}^Ng TiZie@mpJ3H:_.+VK|unHoVdokf}@z:dq3~uvnXRmwڽkjgYWSm:7JۙAKh{_,-=lF^҆ -D(c (Rr+)VSJa{oO'g7o^O'Cwn\wk_'~Kyy>;޸~rsTC]:=;~<~t[#Yyi:שxjʟ}2ӿǾg| s_oE/e7nң|.۽K 7ߙD2o\kimgw-3!@] uO˴6T>AyLr`tCKhnCv7i:]+: Tk(SIx>=>KiisLvkM!Z}p?j&n웫6=+^Lkw-@yqZi_uo޴/Oވĝt%Ԕ931ZϣOԻδrnM:UVpeɮ~m^n-@k_\ApvjxiQ?n?>;٨&+_;E^0^`ﻍt#75Wm~VY7,(lwF |xfS-f+~YlA[~ۛb53mәIQh gZjٮt?6359hV&melr6$mdqJ9 r"hS gTF~#$ڎrgs{"ԴG {\RxGh%߳=f';%ƚ{a˘8;clgcīɂYF6+9~4` F3Mɢ}޻zQurNn^u)zޗ%k]>6`F& ZŜå90wCoMeEc!]c0[m0wyz.[ b{Wf8<'7BLe< 8#Z#$rWJ+,2DV&Y2 !;+x,4ܔ0|4FDoG%!,w@!P HEcwDp!DΩM^-%"׶S} (pkRQ>u{Y-J$q^e>?>Mrhrpr?h%+zZ48g0sѝi I&>[$_~'ޢE r1.W7a}oIAUr?L(Oǁzg@ Ex@qC~w o~c%wՍq/L0lC`\Rk&VZH!2& ES0&fG)G\{̝[&mG,b?ƭ3;J>up KU^ 7"hL=0AJ9XS,w8HSO$N G%sn@hS?iliQ"4T;EGB '*t0AL#FF)茏R ZRzZ'+'Lp@È4q cmuNZFae:ez^٤^)ݙ.J)P F["ak%% 3}} E7vh׳Вh- (NA0p,tsA]> tp:V,n)yΠ /=( ! NPT#0U@;i'K)/@}:s}|-3"X LJe 4ietjKM a:p=0> ,($R`dY0ڔc-) `҅XD6a ƥ&tޙ1ڃ驥Zl \suLQWG!VJp u447 5NVʈ()hUo)@`!P"5k<Қ&hxM$&T14 7c ct(FZ{@bB]Eb{匋{ORkug{K.9ߚL/l)ܧW]6]mk}6'Γ3nʄ)cS&Xc3NW~r?/~apMi!v߇MnY)*՝2)U0i ]Tno }yze"m*m4Vc%Ufb%kO{ǹ W4_zsLkJj50nm*J]UKd#2(J'~7́٤ݰjzw &bR[bdib=Mv n*M! 58j\Z/u: pn% Ԛuoڄh&dJBH$?R ӖQ))! &1GIyt"q EjZX6)&SlKg㌌2(t42[a,sQ8?`NFI10 1;HxDtz*PL< RtXW`feN\ENL)lQE0J"r B8`kD b2B\V\e`yr?gLf((6X`N8L TF )LA9<.yD8TGw7?HlƜӣư] =-*kknHŗ9 _TR']v}`f q"߷$A JRŢzzzY`L(W{õF 0 9ok'9_xwhCFl[P_+,9p6f27MʮXg'oy$"ug{v6Xa^epv8栚txQfi'?uA ʉ` q)b|B6X=lvEŠ&AU( )! QĠIǢDGDQ UR :EQJސS- x] ᢨ`|j3ӖlaRI|{ ۩.,1wuu)(`,Q@GqD]f?E*ˡ ggJD(]e²A)>2VjFAJw$*xZZ$#f{ PdrLT 95h"} lU`1Ty*zj$rҬ˩^F7u29]Ld8f/6:"Z\%'X߮Dbz6Xr8 z*Ҳ ̓DiΑ(oOp6%-oE >@T D%t(8 T`dU[Lx7 q-KLo٠SMx6c4ord+6T '!u2f^<##%q!MhTpLP6ԤQk;ʪ+r8#xǜ7y2ZNYɺ"+> hV0G7`/kٰ+ut}+7:7߁c <4P "W(g%\Y1%3.6`:&f44E",D)^w81CiJ߿C 6-7EDn.o4Z lݛZ2j')p7h1¥T$piIAơ.B60"Njoga`pҬvLHϪ&C\k2KXo`C0YphvDywF JcE[FA!%`N)a 2E㞝h+.C@B)͇dMԡIRt]Ӽ[[F*xh^K?nx-6ڽ+;)HQsl}J]8vaئw5iXn$ZGw .HujPBYSNpaibX2 z#J\JP(\>ɖ JQI坜qِ־3'(sүA$fZm'%*ljxO>G)GPNKSS>O! zr3q N.7s3UjVpʋ{4Hk(8|(-)&^Iuu˯a Bp8azB[V$y\xa`<S2:_49Zd%&pR-ufh9vqERp[gcpIÌ/y 铯sn}//섚n6ɔ/#?]AcueBҖg?GU$cj D/U-0_!-vCJ q s ɦ_]IS0>oA3ȟSE%…Nj̀N m'ijAbCFM9{ y*-T*4Nh]i!QIV1sK@73&](ی@twY'B ,EAm(( XP:{Xz:TF:Fq+pU‰D38 z${O~%+;[( z'ct˻0 Н`i 3͞zl鹣~jLjJz,9"mkH?raRfj$rG7QY+BVr4Vbܿ!^2N Q"!U_!>wt ܄vq:S:n>)y]k͞vr83AUs_;SrYǭ3ܤX'ł[3X|bLrJ#wfV"gRd,f/'',!7'vL>%~YQC-g_(C)k+EOɋgf&A;ro/p=Cϯ_b*%ly\/w 7L!^ ~=xat!x5?} o:Ǔ7~7Ug?BO;}30U3v62G"vpw UwlPx\)z)Z1TK:ZByd& 6yAy]4Yf#K`J::27VEQ]@Lxc{!ުYNL{lޢ6A]E?N,|Ye̳+gs \=ve89aBap7OdMH-xkA/)d{^:u&K^o^ӟ Sfǽx+GO\ze;?-rК\D)L$iSVH kY$&&MT'Tg5#4UHz!=VJ1rñՁ6R=rKNm3K9)RDYcclR`?jWشQj]*9 ?R41,8#,1Q Q'F4)SEJDkc kXvDHڸDi!F)qfJT."xAco=ĩRP%IB()h/GbX(>NO.1>ebP@֙ yj1_ _ xt"W^YELJ@Vx]̟&92&XB(${ ~ 1T0A@ŃEX??` sN;7 I DdUpfc glzP&ŗ Ž7}0gԎ;2{;vBz6 ^kfza'C%ZVbʛlz O9Ug@q}>cY&)u9 Xĉ:N (BZS:ط~2Os 89kケYJ8;0q @9i \?7XuY5I)j`PD⛹h@B<_jNĒ6SVK%RJѰ8ga(PD΃?%JuKqC=NM*I fAB8P*Pl1S A*l k y<2®pt,((<6"E1Ɗ@qx WaqEc^5j/ƑEKn YdI}^/$uч#ɻq߯83z$thkWb꩔C@R.Ժ*)h]f#>UjtY|zk v, VGV*^.BEn p>\a!BvZ1mJ5wb4эm&6j S*3~ [>5UxqˎA@ڑO,c`&i]޲Fcoʖ̤]N)2z`D3jf,-X5frݔ4S4D69'ҍ4ST(rlNOK %hYjA6z--pj$T4gA)ZLcφSs!3p7,JB8ժЙJ3|x,{hJldE 2ٶ<؜b"{~9gh(Uys:a~ d-t: k7^(PGn<2zˋBvd4*7/ʹSM:ڭ`wUƆ$`գ)% NZ HݿRgԢrI׋r)hù 3yHj)OZ~>71ˆ찴rZ>)a߳Qϋ˼Zݑ-;5fRv{V ;TLf73(e?m7ߝqc<^ȟr. C.x^+,H3>QXF[n'yihTh]R~oKv=A|ݹKW!R部Ksu@@@[ʇE$J2"hӏAKD2 r ~5Flݍ`אּjӺRGeʩV`,s{HY&2r12Rɤr;S&4!HSR5Ť+xI !d< 9( RFJ&1:[J*V KʮUJrJ8,/ܙn 9(YM;8 *Ԓm6B+kL!HMk>JXW̴[fEmlB8fnZT cj^=jBa`6m EN5 B:SSӮi:d[:l',8ltdX*,w/VBc]%ܑ&VExIuB3ߦ|>; =CuS\8k5opJSuo~E]t׋J+hd*54{;S85$/Mָ_ER%C,z.8^ٛ}h&',Ȟ3n0O|h?ۆffχPvH_|MlTZoy~z[>/^R,-{K{y2.>JSޯ$r.aJ__7{?Wqa3yq#:sFHP`ƒ6َb,mƟ + 7l-3~e9C}ad5Ӛ0̰ēJPF#(O@[i``TO1 ڸqZV;048m d54g  Vw ڦW,Ǣiٽg?CO+dIM:IgXTc^6(RVp6;񟓐SfNo΄ QE΋D:oKb7P:)R6W+^ F);9)c| }hLN.$PYiwSygXz{6/+/}۳pҽeytC,L7.Yl vFTuZueXktВJRi&hl 3F%+,%aMQ@ƪLJ2iT UXŔZ ֻ(utnV*HEh/CR򒟪r2$cA\C*uZHuTh & ٛo)TD Az2c $ҕh.> ɟFzw=9VZWт{!#af1QmJ?P&"[d$mMXk -)7 Z=jSeotOBQ$ܖrF'%,.x[ja Z8DjJW{[ qY2 d{$Rc$-ۉk멩3NCnK-`E3F*e~&BfRްhDu6N;vF4a̚!#8/wVguR۽CH)[D#T&F&.i*35k'}ݩvfY9jϼN4BeBvGveGHR#DYCR>7ue< t4[-̝q9Y4*IVe3.{Sh=?PDź~HSf*hpQ&rT%A5Ґpk#kVߘZZ*zu{pO<#/7RYiSnO ڡϧN8 H[A}%GnDŗFQ?Ƽn%p@FdJz.px^2ZM,m;aUհjG+%g,× mǚgn׏_f&0L)OL9ݡD]@2fk#N'3JPkQ,*p8Vɳh>%?/y ہ}Dn\v.2GX2)$-m66gCj%";U++CO0AW]u^tyUm]Ֆ訑1f-14QI I SVxR#qXm H%}^/\EBk鵚ŀu.WK՘۪iR˄ D)ML±P\Fu-Y:I+Nƾ{" sZM⼪V5b-GIyRbmX SJD,Lh䜝M=QljZ_;5敦Q'JmMU8R\9-/wdLX?.#$)ij90F;X>p{ӦE-ZήUu2GM"e: ibNhV,~ ȟ>ߧMk|\\iU(}WI@.Ûߞ/3 y Gb g+7zLs}DeP"~,/c4/8 R&UF~EL?c + n<>gt$J7ZMKd7=ƾgʿ{MVoJu(z/M>2keY?h|giv,b xyB[\& Z_9eA´[0Uh,ƍI@t1Ĩ,FER $M5(3INlu<&IZNFsU*cI5%-Ъ*UĆ֬HʊuS=&D=Zc2gGi|sisNYhqs0y]/.ҫ`ܔ 1܇ o7FN+Ön˳R(b(Lh abIىKZTfS}r@Xfg;uKwh1]DZUlb =䷴ai٣B=s_vI^7iX&~<(?  ꓌?='/g#ܓKV}N |br+vm0 Z )ʆR`I6g:N)'5E5I4.W4`ЍEQ caD=gnI-/cVLHw|,'dR"+C^ q&%e$AH37順%XDžݛ߿)ſ6wwofW>Ϳe Ƙ..oXSNV[C@B9߯RIST)4e5&ֽ[ڻAAЖLU:JMA6-dLvF)%@23l,2x(l胙|%B+"gGQNVL›FW 7hӑt0AC΁Bn\IEmDmN1 v.!_.3zJȮ5w(97[`ʜ KpSd2U\;``6թtь"6b! 3x7.J[U#Z=IԖ5񛽀f7AJ%eHu?B־Q"~NIRliCBux7.h]**#A|*[}ng,;f;c" }Tkj}B1 U݅V3mgF%F{3|\~L hl5]ms6+S7cupr*|TJdI;#y\?38/ԀCÑZE6@w?}b{K1"pg{+A mzz8ɽCOzޡI]wh;rջFp hgh\D!gLvjs&6쳳u]18k5QMLy_Yzܴ^V7O_oC!Q"e14Al!FSWR48orS$xa.`.!#< ݾCkL&~zbCgrn!~sÏ#ޟ=J[4Dp\"|zp-#SL*DG6\sxrY.,uF)/)4).0cm5ASe" -$30vP'Ʈ?x(#ʺQ@EA{h~|˹[ZN|}wi_}SeS]lCƪfw(˲$[A,`N~E}T#%)2S!jU)CUP{U\MÞ"\(J"qudp=QҋxڋXɄ(\ !:Cǻ[ׄ,% #u(m#UU!vfp~QYy:xUBKͨ+H$h&"H93` s4؍f!~<'do-gmpnC;(H((nǨjEܓjCDՆlX=fIh ^S1 u@,v}jmȒ ڷ! ,MaC@ߘBݜ@zKB`4Y@ - V 1WpQxUuwM Z14c<H%Ou*+ኰi\*iYD]h2._ FԦǻDR8 *:HgxZi`ywrTe3mC[;jYlioe\{[5@cT:`ǔ$)=! {G A;{G "} ٝlĶǞ3 oUV{1L)CTn!, 2\m;3ۘw*6mʸMz)әrmhuFm==1>} mzo61w*pjIFCBJUL&Y>=w)nejA坷~E6 BÀ^G/~m{ @S6*Ǽ9kv[O﬷DG'&f47zL3@fqnvV!u U 2AiaNS{3;vLqa"@a"-(J epnxx%sܗFKi`3^rUC*]:H Go!pg<%L Ǒh(oa"Da3lK->ߒP1Xf/ b8G˺Ӭ36Yzy{v|ߝ?UI# >jdzUY|߃B>eՔ&,',+5FV{DWWZ~b/p]$Q;i2S>z,2B6K;.5:oKmDjf猰RKT0+a d"#:T-4D=ԗtA\ӮEGuHt2+ X'hXSCRIpPAH[+Eߡ G_w{QKp]t塵YCV ՠX˟;cߡ%"NzAWԩ:D+8FQ%;ؑd>r5Q;:˸!JVLɞ;90x\۵ El)iNRԺgIwڶ t&*.VE'rd*ZV] Y B.7CTKOyN<@j@R Q;5ZŐÛy_/I5MWZ;|`>X)-J걲>aYbtD\6^q@~2ȓQ=NUx_#!_Qq5u_{P `6IrmH_5l m*}^乙_'-5rX1 !z# 3WÔۇA3'g" -3S7X}']05j~w8 ]ؒKg C JBT3Y_TcL*t&`1o43(O# $J*(oVd/F_><<$B"{|윝# U.?O!SG>S|lR^w+追vNE1 <ӻ?Efs&WfqsGa&q=xpp6YuxWخ1[ 2S)jlɚ!z)hXg~ g✅1tax^M]va8. f)r>)P]v)5h AKqn| 9a-pw u;g1.f,y_;KyQjdmr 3@nFE%'-Own;c=8I iA;7*K'BJXB`|_s݈>مλ;_}Dr1(MeƦH\\_^"?u_ 'C;5H}ῂwjU΂Ѽ:{`.qku?N0 -^z~(*qn V_-:J`nPSb%Rb b+v򑝗 aP1ri2\dnSIXD㭗qH_/ɰ @R S$$RL}F>HPddW&WC|K ۫b4Q:sbُ;[>svϫxNZid(4ʁȰiUs-F:j4U3O2ֳ{|ot>C`8 6lh-$B 9eF /\O+S0H$j-@q%4D%+}).+V!Sq, 1+ ujΗLVE\%q\UrQ_%7Gy3:e~<{;Gȁqm앏,/ä-<]hLyF.ϿMMeInղ$d&)"o¯G.݇$i(Cm3}vnB訨 G;9٬ǥ1ʅZf;elo/ ML*ʟ׿tXէMX˘ႫS` ,}&q_* 2Jq!{.jg)S׼ [C<^{;SOJ7NN1p׉+dh@ˍW&hl]+.W#Tꅟт!{.H1B҈ܽV^j#@QʣxSȘE«l@?V&yd5f=&ksRoI-|eM8ig Ёwu(aFP\O??γ`y[ԮW<3IޒIeL;?Ëh/=nHU:G'`Ќ(Jui &By_Zdkk3P5M%7x0K/j;I|_9Laq#yw+mn8/+^hCUZ|q5P{ 89.R)ާ{PYܫ#xW!!j"K91Q 1[.a"O)O.7T_GYT $sC%B(٫1Y_4) )`%!zC4H" ;bhBj`)*t aa,EVc5ahV'!% =ZbN劉BKҘܑoJtx HTZ##Phe/d1LS=+MFFdE?&82cPR5{Pf! ;6uuJC.O 4@Z#^7!}SҢ4wD#nd{H0Y\ T.[yѼ+M[lܚcٶZw؈g7ͮS#^-*~kfc#^-}-L[[cJD{|ԯZj;î"Lc?dZܙe7?"6+{2޲XJ^#ʔ@-@[h8T^;7k3pQ,jDol}w AaT`X(N( v,l2تG$3Y y3Do],lt{M:iKw<Э:OfGoo #۹PWQMz{3%Y]c˲y/سٰ!I^3͸#. N' ӱ&9{gvz=֠9ioDɀNx:FBRvэ/:`}o՞AK }%H <Z# z?LJ+)/†5);§ :q(uM|伯s@ߔ0!ҲC{uYsGo&Rt;5'%9635'|=:%ss{鿜Ly|R|oGߌ2་1Z8h <_{R~RQKW͢_ 5L1 ͙+X l? &ǯ~9 BMy,,zj)%GeEP3Q^{XBʂ`2Wv[ʥhiTSjt2'@X=yYx&`+J[4ܢVzZ8#$Hbk (`((wR0c G}8G 8.fT .K̈́ AؙG5؀7* FNއ4trc0-Hh!B,gd-mlP,YqJ*h[g &-G?{~}+z7hO@,0Osayu|wd?96?5%sI ~oI4$A8?(e57a 1|aa\VRA ˂]'206 +WYE%\B2Bw!%i~xHR!e0g5*9(S֋0V[]wSA(74Atetd 4mPS@Dm ^Iz"T[xq\sSM'xIpUWϥ \s5\\ eڝrү>7 ׄ Iem#o~ށ%y-D_WNk,)c#P:ǨCGWf4N2g\l CX¨-`b K?!ϫxK7_b ;G7YJj#@%H\rր/}hx)k2S @jRJ*:yu.ɡ??IkIar5~Gj0uw  !JVn>}z}?GGme I`Mhʯ4M bUAZE {.?w tY=Yi!Zh.]Au {sQƩ9_$]ՠ L=7uGed<*j1d2jU$Y oXǐs"N2oz=Wg;  fZ$Pyw? ?73G)Hz>\xo_/$Zp"hisih=o5ɬ\ex.Z;̻vhpkgZE#6 2VaA48(jtJ1ifSMR-qvƁ/ *ԦJ /< 8,SHթP#xĈ06a9C Kɤ#`ϝ@qoSAQ'#C1(`v Lu@&SQĆ?ݿ.s13q^&v7W&{ ~4{fhƯF|WɳL<"Ocǩ7 (BJ@}idX5` }&{a=WF ؤub01mPԆiI/UWRVJTR qD湕@r)qY.4Cp]I8' A.OU)$CP»VEMvߤ\g\]^7fVK=i&kG=uA G(PXca AyY˙\ʁ6|l(ȊV6&[@> D̂ڡq4#vY(ȢK#E 3w..;Y;Q{d:q1 m$@ 4{EvNGqJ%Uxق,Hob@ .!hcmJ {I <:G9Bh|(m*\*ONr4C3:Ab8D^a@ODn,g`E2"Ú04 @PRcK#=wYG {Dew͟Eな|-C2bJ_uK=8ii ڲLĐ  mT]Z-Ξf 2T 1Ƥyw'Xsqk%WfZ@EUTPJf嫅% 85= mS46:3!^3뻏2]Y5z A>O=W|6W573ΜWo-B[ey2b*1o|WaXtż46/Xf]4imձOQ[[qmȭ+:s "n[;裍zs6=,I36=6՝3p=jQG߫=n}+WvWHzG4P O1h6cʌ&+.A%yC,h=L Q EKN<5dB"gk"J?{m k_R KhJ.iJ8vq `%W0΅C*%g1F/AV[ W}ZCC['*4ɨOMdz (n}*ξ,^.:AC ɖ_)u #^/(:bsUBw`̽ATS̓a0Ϸ;J%̞9jXTK%ޏcSY> XiOҟ Ӈ+_Og:Q>~)rgWae*ZhyOUJ ]8fF01b$Ԯ.|4$L,\7|? -_# e8P'{7%d`^aY.4LJ4;ŵHn5/ ]loF]'ouGtݓ7Rٞ^EGP֝{V@v֊ehW~VQE6-Q2g2TJη u8"Rɛa-zv=l@[sd Љ1B:{ Y\&KV+TF(_ok̍źl&717Z+lMl-bko5[=sL[dBZL5 Ӓ@8)aIXfLW*c(~ThI9so"9ծp1YIb8z)0br͐!UKij AEP$!T"j h}Rsۣj}+bYύNs#]+M&) <0zg(|Yo,do$SqդMX_D10&3B$7A&ӹ :ͺ߉'m"ݼcX  )*zdͼ!U0 , X Lp1C0T׺ [PXVcXLHx1;{!(]Tî$PEȘJQ׽ kidWސl i,"hIcPfIv._U%uVә?1I}Lޔ:[s&dtbf*DVf_+``:J!jXJ{͞>{%$Ș,C44F aΉligv*.,9IxA[Dͯض46pIo $nxIJ*Z⑮l c?/؆vz6A`M˙=`e9oL )YM 5v!n|9% ԉA'Ac-TeВkAD;Ma_xLY)A )w9IQECSTyF#_x-X%_^,,+8e9ڜltF5Vi& ^ ݫhL$jċuqY߈_XS;~.(̈zc 2H/\@Ua\7f _ш,QX0V%-͠+[nۖG]f3ߙ״"d{ե[4KT"2u2cź52wᘚQ rF3(& ȑl;¨YJy2Ꝉ<"'w'n1YIYCEu"񓏶ewE[%<1FWy7oyކރ\PjYMyꊳdS#'Vgk(}H۵-g+tul}nE)=31M1! v\7򤭜+aRM\ҍ^:LR+`m6Fb6g%53TЌHA1t$\Jk橠N0ȭ׀XNQ"ȣv%Մ#5BKJ!qa\k^mV-b6pc '1LPBy9K|h) 6m Ɖ0*S('ZHb!37:l j|Xc?A2.;muu;!|}u;];غ%cV:щ2PB(!y3q\3`I4je|nkriHעKU{TU"-dQ@"sވu nU)m}X~^sC=X.-" (L+xW+C/*{W"9/5w FeA (!Av9Y цOlnߗS{MH-i"LPgny0xG']S3GEXgo*|07RTF \2, نw;{/#Y9|j=y Ҍ 7:)3\RG 1'%$.⨘C &X#_kfV /><'["V;+lsEK1D6NѴ:"$X Mng i+V(dyPd|d0>5WL/O܅HdסgC"7H^ivL71i~ּqH%("!(}a mD`M>@ \:1TkPY$. 8`x/1R<υL BJtrA19؀0tIQQO=K- T VA "{C`r㞠v&y"ձ*JͭZTKRD{< :ȅ#0mhFrZ8SՒ1d?t]mxǓO_jEWrhgk'aɜ"Ƞ']<8rqGז]6 *@JBu5mL**nWli%`#u6! -PBJe,Sts=)R';vn ?jkPh&"ƽ;,Qt`_ܘAL^}ɻg*cp5{K| u3yJ^_y߭>4m\fŁL[D(ئ8a]RJXNzڗ̇u9_>ﻄ %Y+~.VDma]l֏B e Y^5 cBdC7361)" ?[qY.:m05{Rr-hֽjkR!ݯmTo,yŝFATk-@)ZD!L=? 丫 +2,5EyC+.5)*en%$`JfIhy;FIqI}<+5Ü^t@V*`fw5sp!%,p#- >5e`%ߞnLd;%ţ; +bv!;Y5*vpݹ4Wcl07{1CY  ŀk:2hdͻshxu FğVuB+_;.,LW`r?U~1~Sʯh*6A]@̈M Iכ-0h; qx[?lԟt;zf5&ta&굓"wsvr8Cw7F ڇ[VQflyn~췡::(ĥ7My1BtyϝBO_(,h z`?y=RIXHvGIMctsiJ"s~mƑ?99pWt}^ 4˜|_TSx[ hYX@s˶r:ȂK)҈(͝Kֱʼ"&Ej璈ۨI{pbǛ-QZ#+ :.Ôpf7D(W޹;" nuRn BVݭ{4~78Qї*n}aP)H nl ..%"5õ3a7i_@xyLM!Ŵ Ba }7<9$X$mu+ׯ5u~b7/%Mz7obS`~dRlĦ8 Jb*)J&uVPrL$JRccQ|}3xK{/<sV}Q{o aMM*|}[bEy,W cgȠŐ|U;DD_ [udzRK.EG]PJ)j!<|vln&JȷXi| +ꋰ ʇ[LJN3cCP$j%Q+T T8K#wfyy]hp;9t籟\^ſ~絹ImTnFݸ看>cE=* -܄EӋH% @kxJyE_{0'wl73ڕչԿ'?>?Ln &#A%׹ ^ݫVlLSTyݗ jw?Z9黓wG:rR'k gE?ѧQ~W~vΎߝpzIT ˗Xaó u8&sAGٛL+::c\ԟ=c>rfԾtݝt/Nv׏z{م;|-ۈ7?g-or;LrzջmȐldw_ٕɏ|̴Un g^ p㦥S=jz E F`]xog{^c=ǎ%z *S)Qo dt0{-Pe.z.7$zvOop餛:{k{nxٽ3Z.|nE`v7{]{>enA^8C̅#30qwa0̹?\x3ހ%yݏOWןO.4ku}u8zl='ɯҿ:o345/en\ȇ^U#kzZVڏ g3hou_LPi2or+-`p8׾쟏I>ºc'`Fpǻ ]ޟF<;7t.-}ahȟW&*67߷9*o1I3(= ;Ī؆GܗP0׳)*™ܗ29tM[l+oS<9Xr.PKXWѰZ|H׈V hѫ4{5Un47dzu`a7m '}~/de{ Dm sRJ-}Bt`+-D,o&B3~I$2NOSc"`(%R;C9%2E0\'0N)p=,`)fI:'ӭq0ϋJDV5/Q3=#gܢ0"Tji|/]!^iHcAE(j20`9 >˿xyg@2UZ0W{(^X5򣽬sRml&'j]B|n%SƉ,$4Z93Օl5R_6g1gTgŲ0ciy"#Ldat66,6 yYJ/47G]@M\YA>>&|Yb_4B+^ ,Pq-7Ɣ눵EJ޹]+ $! w+-w d Us [[4 ͱ3H^X6l{J) uS*9++oq[ZɆivvIcQd_k mG*P~Q,s\Mǎ!9$ObcAFQLIc -G,֐XrŨ(#(x/TβNiuUDZE2f6vLT"6ʂNm aC(46X&XVQ+'z&l)VTp%tAb8_мpT2/\>N)-{$E-= !ؓDKR8Cˁe)!d RcU"-}kɵ;PRzOXJb[Kpe&5}_/ tJS$r+Z) 4( µ̟h B)`$9g 3$"՜ǩCr5&:FNi<`<);){q]laDu4ᮈ\t=Hp6-'/ӚM{)?/U\/\BquJYeꔁ Q͌Y}D sTC"§P3!%e<,YyT͕OTelĩ7G=L"=_VQ* ou#32{5v #ύS H;0,{D:{1,U)LsRH $`zݜT~N*d5IIa&>}w On<}$C9;4\_IRJ^AsY01ȃQC.;u(2(d%S61QBE Gr`pR!ͨ,.KdP"<JZcYg ^cq^kKOJ *i*`*xc7˄b6ݔRvIݍS+5tωPaݸT =.KAzoEz--6QB;ҬfEEjùP*8hV,Ղ@1 0oqxjS+ZD;VV:]GB {,eH+kKRȵau XHYJH(iWKgnjű/{Ǎ)}UfHUޭ6/NH6iFqΌDNQb%ب=xV$ChM5 `˰!CɄGasN468t0Mǹ$|1ٌ43(u1a{Q I c׵ȑ \L#8]wLk# FFb#e%(Z<.?"R ZY:2(&T4hӒ){>Ib hY+\GS<ڡ! Ws Q76X ^hoA2F=r]6]pGH(˚\ SņKճʒp; 9`V@M}ifˤZAJ[#&%YxcizQ;rkݺxφ:[0@_>r<";Zݑ;9G=־ZQtޕ|-k}'څX.:kك1[wӵ֌m- $Nj60i%J( s9rVsTtVjX&[<:K1"F;+L#\҇ YDŘ,$ Fk:%-;2U#GBH#Zȭ8?lI0:K&^Λ?9֠>Oݜك1'@i1weَaD"AGPvhUytR|tZV2,sM3{Vf'Vǂi{O-{62Ў%W(V #L*xy8!:T&X{RL@Crv=)lL`QT}F:ϯl :@'Z< -(R fVZ4RYS)s=$% NaşQf0Bqeֻh}4?Xrv5lz}}r\saGR^M9=fz_IK:!p CC$)鄫V0)yt)KB1[~k4VhCc=Q>\ǍO?ĴO_XH<g$: ,G[jxTUycZ?=1`OXX0RX(8B oKrmP.G:m'6':O`ӻ"Fgc"CiBgq[|}Y/߷r0_CLiigؤAh!>hsr1@'De3[^S O Yeu}`n>Mq6Rsp1.#Mo:-I{?P^aJi9oO+rst/E|PI4,A+#E5pY\J!W6(W\5QHao6+Y-wZĺԝ{OC:Z.i_Y<q D: 3 ̓Rҷń~e.BD`#)JV;nA}_kV`}UG{ș:W|J 8{wr!|B c6=LA(G 2Mހ4Ѡ*FAQJ;!yEPb:QP*oa4MTAb^A,!w#c'b<{1gFRkm), PHJQ܋dJzɞ]waŵ̅+5i:<\.԰k]⡱t֕+y`0<4>؅U+W? :G\ 3覣cd3Z36"#dfۉ #oE7ysb}q''t'w~L0"sЅZF?]\,໳O7 7ugE.\Z}퍒[Kq=I+-Rr@!#Ka,w).׎j:A?99{&@pAyr8r`1 71st|w#tfA#̝2G{,3/&~]s{v'[Gw֮XնUNη:ZNoMoI.V퟿usM^D*Zp +Z~yl9.#>] .'WtA*WM^M^_eWw//*2 3{l| YEߢ pp (1 [hs-ElBVlV{Ȕ*)\iead]w)[Bӄ E.,PYTB$I%1vW,@}TImۖ:`f[}IGr+RحL2+lٷ26O}@2#ID@Rg \ii<2iu,gOr;*gUOfU\(*`u嬽Adgpm1>4նع Zɂ TJ u, > dr|gwZ2_@ƠmUsT[f`WJ\zeu]+ 7N':woO7$oWɍLc\fA\m*N&O&Wa61$+5x:п姳B4]gܖz3n@0;f7zEC7GUmwo֟›{\7ufs_oZzҐgtJ+M^n]yP:]Qc{L[7`ںuJPhАgtʊ8Wj 9uʃvST fݺ*n}h3W%*"P~[ P]YߓjI>E=RV@- zޔ!c*G0 pHG/)w4LR A`F r{Q;k13|#Zrs#j^ 4(1!DKtgBZ8XkQ 8f.9BGfCՙrP.E$ S(oK޸+w|6~5qxh{u),'7 T϶+`qXTCozö l%/Ɯ4SrUGex@.JUsuɲ赂u^^H/!#zhnf4)r0fxzdz6(,:b-ԋ#wUL6vIJЕHK;k9}ljSB[ݿ8@{IJB${غ>/y†|xi)WV*o8t1!gN!(NGOذx!/Ĉ3A5łj7UGrjl7joݟէv8sSdB*㍬񗛉 7)۳?w&z2}`l|fய.Χ3h]B݃XʮodZVr};y " )}D<I"%ٻ޶q$W|9rm~XOh;z!l;vGN,;VLY,gUERpe<t8(9 O!q8 P<(qyLX"Ռ9R$m;܅H)}uW H4aG%1x-1FI)޽ qEOH~L2):oWaw p`.^o=ꡚѺ79rVdd3a>%&q熮O>{S3ڡ(B**>2K;7wWNW~Z,#rی9& $L),)r.^ZuU(C]RmFΉPm۟@8iD5x!pQ!`z jO"PAWs a0 c\lFu_YC-$'naD{SDȺ@ce 02< &=0؄ F {.$zmJd9#r&J[*rn}`C xEʌ5'jԌEf*x=d7.tEkm=߯>F\P$H )1Sc2TPQH)QY 0KD' 1">xү[e̸)e%":*LIq&wV2E™{.BrV8-PT8kU4]G5Y]Z}u*zvN>߇=~~ o>?^~Pn̗. oW~HMsnb/V/"v}ǯNGlW?H"D .Ϧ|񯕛yjBQ) w sַ+ezs;y R j{}Do]eڭgRh}5˧Q&gznqI3E 'W#>p (}x҃YC_b:lTJK_q~ x$GWV2Κ߲ >n EXtw&j| -" P.@N(|ԘY[ 2/稅jQ:VTd +%x_XvV݃וv_:JQhi5Z1Lݭ{Gq! ϸۚ1jn;?"R8l53f>ɳ:_n!r1lݻm'}vQVN/+Nz_#vM+u=Gb;"THl XChv :IQCF|xԭ ZPʏ~@^Ii]iä~U@ȁY[R"NsNz #ZԌ s[m&{%2emk1,onlaԍV\V!M[>D$k>6лŰ[+'- [KNw4nGE n/z77- ךHb',=?ex}m, l6EV{l7A`aV"F $u?fYAܮGIeEF̗Zc4jchF)$m9pl(ggJ,T! j*3L 4M @\@T .[s\sntf'Nф1M'Y\j +Jba}BhTnKυmEC-$#Ԝi":e;VAiTYi)5VdVzVtlW26aX)0+-v{ 5ZI[iPl筴ZV,JK(0+ܯƺ9/zғR{'A`4pS:NJY)fAXsR_nKƳӶR¬iaY)[V\4(U+^"2TGYOJ R^N0M׾b:u?}CWԊwN ֓WCaPPZ!膖5MIٿ訅-૽ Sϐ3JUns·'`·eޱ-F ·DcTc1Shu `N9Liʆ4,k=c]}c }Ov(q^E鶴_3N\v旜vh?YuNn_"KB4*f6N޵dB AEv$5$A=ӛ7e½"嶼]7#QagӛӲR~HIw6!bS {>\/ G3uI{}.D@oJT҉TS"0QC 4 %hH+iZA2"v^8굊 5(eOgQi;xcd _7V6ةWm&"߿͝d{_wW:y]fΖd?RKҟ_NRc~HVj KTlq S:miLp[41IZqR:i#,HTX1EDBT5qny'{m5ҽ3 nwX<_hJR)m4LX,5HC^@+!/EEh6=@ nG io.^L7bX=r[x1Ěw`eðAp<ٗ֊}eߨډ/Ml/^k:M#(%|1F:n\ 2(<K&p*􊲒Z{0۝4A!P,2; Ahd0FP Ըb29!qj40Bpa&2sYِ:CzU,~5?mªՏgt>psU:}* .VmbkHĬpnf҆ ۋu2--dFQZAF Vq΁1S|Ѵ0Y0Rjm'6R% 7ɫQB(K$1j1$hkT,cyJ_9lBjk\Gɿw_ƓO_y%Jgrfj\R7~W0! 9K|9}a0/Δvu퉉g T?o|Bt\Ůugׯ~d,qݘſV<<'je\'ϣR JnW=kL y qIٷt~Еvw= ' "EY@Aps4,A뤹Y)bK}-5"8OJy !Ui<簔h'l,[V͆Pq2XIx~hh@ìJD@JYRja2,U JYi)F3i[)0+Ų&|V2JqmFSR%vIaaR ת!ls~8@A(@PNu͜ *d&E΁+Sij)6 YY+y*$A=݄ TڇR*LWIV7B/rޝ/ݭuwF핗lm{֢c }sOw/Οth9ZuSXEh%r|QTqΏw/"<(.D5 "Ma* Լ!c2+!oο8pH e*S=0)|تoM-W3a-_0䶞‘c@noT~2&l+F # ƃ? }Mapqъu>.bQC%itK4ct5!Fx'|R)N[tb`8ƃҥSCQ(Crj %ZKRY)EpmuA'Ít^nDfRMn(q<ô("%@y O3ͭ63mt LUMEo]R1JMwҤ{Fȏwv&NH&*wD0ƥֈ R_iqCRJmNbN* fުnST>!@ŲRZ>\VKwuzBwH{ޱRJuWY]BPmW_tժ&Tج Ը,럫 }!LW¦ݖjVKm׷/3 8?{[5<+*թb3IK˽csbz]^}rջO=Mt(A;ޥ5QQl3#'KY9ť~r }m Tvwr:J!\1љ^uwbmP@:<{?slW'(֤ʠƵˍሴí;xyٺwvxn$4Ԟ&jQ}K4=YN9#dH,ly5{NBZ -zGmqRm~,PP6(io91^]*I{#~Tu_W3 N#{H$x 0>Rs퉨Jy ަ~s>?vbRjJH%b?"%<_S¹;?e5C ^=}ס*ͦ㵹^^D7g6}xoWn8^vQAi]v(`дÉiX*\*5x"4܄;C{ˁ5t-:#6w*Q|d&YJVrB=v5y.Z,4B­6g%,bڸ[cURm ahյn)%8uMO'Ry:w'$JPa 7R`%U r1AT̃6AOKyhͱX܄YH 1h %91I*ӡ2.#Iu\v؜; ,68~:^9dc:q1PpM6/*uJC7TBl64ϲLd'm$9W^lyj_fcocc?x !+3E7[ytodE:HIC(*ʈȌ IR9phH("+kS,A\qO<*)~eT T QpSS8%H;RXjd &dHpQգ(|*RMU}UlDRgR \?pu}|B>>5m8J i>: ӵڱn$'J-dG~ṡ$3k7<42Dr,,Q VQ,3˰y?Ȱ/AMhԄ5ĺ#?'Mp_ݏSr|V8 uӠ=q`A7~#Ś! {\s2~ -@u/263ت4NU4Q1f)X!dۑ+1+fpHb=(r-3Wb_ǯOQ,X5 G/;SP(7W8 IkNQ'30M: Uj ^A7o/VĆ3W}/tɊ8YDIBڤbcP(byԲ2͙:LlPBQL{ q{n賚ꢼ8 ݭ$&$a*ZiqTH/a`xMm(Ϙl>Fϧv;M|a 1pyB@n9X¢_˻5^$8RjVcIH1ͨӘP #G2C\!iBy4##b39O1 uȨ]B_2-:K1 0<4)>tP%Dsg49eQG@s*CSeOZ$#ITE,Kcb6"FK$^lPx3\;\P\:\r4QAc2z 8RrrC B{"hD ૰4Ak= INC/eQXS$R PWB=deW,eRp"L%ͼ4qO Dz ѦE{;9R`'H4}Vu)L!aTa~83CV%T>g6]я?73&*;U^gkM"bVNG"ƾO.ʇxJҮX0̔PlV\\,пZ"T|}nPg**A'tFN(49YGΚjb /m'%k7㡖qq:RqDn oUNË_rr"k ' \ ~]j Y s$5ޜ\­6xB=|~s5usWkyCP-,|āKɉz55AhPZ'Ak!]_?oh<*Mv폐ݺgC1| f)}?<|;?k>eYKELnP5A4]v;3xZ0ݵvkڐ7.MdJyvڭ) bDu)OvݚBEO}kG[lآ|nI=^ցS!4EJ_>k7q;hBf9ed;,IdSO0I/KƧ x̼K-XYNj|t޹xd9*; dbWY5BtY:W5AST3w:%ĭ $$9g=qc"$TɛWFy eC"02vgEf2x 4^,VE?gӿMpg~;h-ۄW%AnP>7Wi`zWӮ0)o<εy< Uc֘reU6Fm]i2:r U5v-te=&,L?K0zS_cϲpvbS{'Fzc&tw5nSfHbun0ߢ |¾-WX4-*|e2JXrtoK)LOhHnh"dv92bB eFR+ eLx?Q) ޥa6)WcȩpᵲIaT5v 58'pq ).#"5sFNSM)cnjT'fI#nEOcRe(r&'CPC" !; d9!Z2*'a1Ӝ:W#R.$']6B!Q! VCyz.1pΫ ϰ$HG8|d㏉3}}܃Yܳ;O>cةa|vCNWjyff >r90C@jjH.lDn|`8gE0O?L$o7M\ճ%:@>GiH۠onp@$S5N4g6a$uR'hj ^@7rݫ8sk$cr*Zj@ȗkq0$d(5x-p#b3$=MO0HJ#VH&4=\H.d\Ȏ\xC8RCM.!I,PT!x\cP+_G{ /ۀ(9<7cx:Rq: 8+Qj˧ F)~Uyz_x<|W׃m،#U ^XKFX,A0+37cjf[xD O6Ĉ'b%?kiꜦ" IؘY2;cM@qŁWqﰴՇz$rX=rӯqIΛײַLpr]-Y:oD[c8QbYO=i"p˅{Rrk311R~4J!I%X%2r[%1o)/uKs%weMwĈZ\}}|H+8gc:#>lpڔKϘP_33M8v(fTk0ZiFUp隣uyuޙo 0rNyTP>;Kͧ0{*JU=;陣1&۫_VW]pѼYT5iR;:"v}2'ZRkTi2B9>5jS ΈTi*cY&YJK`}VGLV|'b9\}8%u:_kq\o RYGZcԈCJˆV7)kgw<>c]<dp׊ І?J.*$խݙr/e=p>٢p5 \0>% l"10OfV J}|fVe4WwA0ivg-,%ŝEbt.Q qJ`;<{h]O&o0]bR'B8)jSx\om Hǭg>}<0 Q 0kݴK[3kR]5yXfy^(xW{bZcRmݽ.} nesFnv1]0N6bn] T|U_h;WsgUci8j/zFX?eGVhsl>NףQF k,Hސ#.21*=vNԬqqP5P[ p׬ߎ sWSM*N,:o鱡VExMG$8Iێz*m{t+q+6 WI{a0*HPI~aUXlZD^Ыq$p8; ] J kmFEbwF22\^&8krlHr`e%K6)(G"*@߾`g (McPfP 0Cm?Bmy  p l׬2nu)vPԨM1dKur,$yQT}%9DŽ(-2UF婰sR&$+s% R[$}.{ѭAit(8rI_dqݶQ6.ғU6*Y9sI*V6B5gne̋UF=,P*>F]UQ|3=<+VGVaR6ͳڮj~3sY(@nm`Q@d=J, (u-ig1:Œ~~a$1XXN;Bp~ajﮭ[|G Oֆ|*SV{/jVкGubb݆F'bA=6RwC 9uawq3Zs?s?GäkIw9 2FӖsӶa!W@6ӑP#$O7@$Эs "3&]f*5EʤNn4Uby RG Q*芺֢Kn9$CfJSA+fPJt1^k=hm87|<ОBy7ifbqa•a,պTWdyw~7_t/ݯ'7/Wd[U\$/9 S Z%IU&_`80}<{3oaœCT8v' a ;&Djm Is){mzC-b䉪S*sгO±>$O2]sFW;]}tM]m5If2E)%Ҫ(S!:r!,̰b>5?|b,w/)n=I6~"x@&"w-lmo~г?2Q>O2]J{~x=r.Q#>*7cthP\[9?%avF&h{]'UJx;/ӝo^jֻۭTI!wkoK.&5W`.odu]s]}OQx?g w?Z_ӻr#L@a@m5E[g/\֯5hiLH5NQl L4 fBE6'o]zXd.f frhTay{@9ћwКOw NmwF{3#~A!/͔]"9\U6wc:5av{?>*ZTOD6D/w9x쮿;z ; QC5cZj-3;fw (DŽPkٍWbDY_I&ƞ,B(ittިZw)tp3 Hة\_ $4jt鎝]Gy70 Ӊcs?7|uȿ*Ole>1sԭqC>bގr^Bh{SjOyvZT #oQOf̲ CRvmD+?rYY(%TeLMSk*CIȳa6n}^7#jRvpt1f$`/ğN-p%[@C/㴁(:|F56Jy 6d>;πH>gc #Q ؀FPLY統 ʌ}_ ~D$[q{NSIjb}p )dF[K`u; `ρ,nZP/2UJp(P" 9+YnSç e9UdS09DHRw<qyQ"9h|=/VoAT|8K?=X3Oݏn"y28/~ IyoV-*|6aH,zWGPy-Z @0]v9Ahf!]zTǭJ#f7>,i8 WMq˒h2Qp]BTPjj0)*lygwmEƍ}v| QpkaZctvXdR2 p mȹ&*^<YۙgLi$vz't|8ߏؿ e|^~V:|#wOygMˁj3qwYIܘWa}rAb_TG _濷+]b'^wW1bF%;s1Xo:OB:I|,KMYbjSˇ?O3~2q(Z#FYTZ  +t_ޥoF0DdȌЎGrzGqw% h/ ޷ٙr1z^΄{RPX)Pp`' k;&Cdv?P hu2}svMތL::{I'bh;>tE/cXꑺtMXWI2϶ʪF)r+}VgV:\__VM)bgwU.]Ew_'>}s+XUKZƪ Ru>")sqU[gPksb݀!Fߩ_X7h7n2QwX/ink8iА\EctJ11zuƨn2QwX!O?nmhg;:=uJϔkm$GEK&e,յ{6P̥F>#?֒ӽ$[)9S"$V奞`|d0 &̾\9nӧE0| nkԨ|W⦸~ʄWo?xiTDۭ|N*}-~uPI"*,0e̺ rK9Jڎ؝G眓hi^#K'rNSԫ8'6hˇd.WgܦFt{}WlNqF4^6:ntt*QHj^wODm+ʔp7{Q{>:}FT`Frs x+'?T$q{)%u9!A[8R08} Qiܙat.ޣ'_P|Ni?{rՖ|7){*҈ ^><] )k/|a~Yi+he{knk1Nlު٧dzw.i@x2CvyRi@9ahVU^H)<ƚ:o!!ʱ!ٕBaZ_Rho0a'=DͶ_HFF˭Lv6?Rd6?=Z6)YGd;?\uUu?-VY}%/yV<,Ld.ƌ<# !9.ՐY C|x a|͞o捚r sARI(ld;D,3@ZidUEIIS[zj]8ڼײsCBA<$^C^.+/hɴw;,`Q%V|+sRMky4mzqF` &7q(O>%JDi6)V::x+iS7!„ dbao{ mA!d(/tJ!ygBkԤSUCЈsF)ʒ!RI"7YB+{W`E!y^(7T V.@H57*$#%P> $D!]JQ ܐNEH.ԾQ>k87*2koa>Gkm"rKd} "¶QKFÓ2lqkTkv{Xhb$9=V#Y17wQCin5WyM/lsUyOU@Nof{n7lFٶy?޹V@kEjPj{TW7b*fO||=smqTvd}>J&pF/tVv4}mL[HFˌ!+*K%fys BBYa !C/NK4k]@z%7Mr,,UBBbLPU4%-*gj.Ui!U^5a9w;[YfjN3@vXXÍA'^?#=tQ;Z%h6Ɨ8l?ϖyxpHi˦,'L~/^kO<]ns -9TM3ch2ۏjlfONˇ&kOO#m0axq\%Zy%e>&OVxo8e*E7K賹UPSh. KCJ47MSvQ @RHyR{+lXdߥ 9G32L @< l'p@,oQVVHBV9V, 4v/5*'$Pԭ^8QaSy'[gܬ'M%tx<%sD3Vr)YĒ *XCeZ wsQ8I++o."44´CáX,֗i;B#ē$O7Pt=F⍙6{U㑕|k{b"$Q*0x $(=X8P4zpA`)9yRr9ißR4 qI)Ge]6yQ5 QJPJI=Ro.HiD)1?Jx{TK4v!IŅvrFjYhT2(-Ջ%C HUU*EAQ)y!߬v=bP dKli (Gd%Ҟ5IokάIf@x DCeM2& x'XPJ%M2H)P޼O !̉4RpdYzJ A"Jk)HZ-HuR ߧ!Oڊ1KY/?2,a(:ލX[vF H~Ѧb,|i^zDN%sp}Pz*CU!?z8x!0Tfm]3y::zb T3(>|1>lO^yRix,!v@TR@ F.*`ĨUPpYt( :n1`w㣣, 1%jsRVt"D]fcrCͳ<.W}M.M)\|aB$ OICgX/ǡ#W)ɫIXR$Pi$D )b8Y٭:Ad@pn*M)s2ZMVy,GB63sV[ݩI2dDNØ>aX7.B{)3BtaHU8"j(u2CEOT:ٺn8(K߂VQ ~N@ .G.  XMƉcUTrd8Fo2c[5Qa37UA+l~3l=GHMY5+n4_&굙f^zݞ77+HrRWeVMi؈?dUIE*Yhz73/k >~m*?j>6gQٮyL)?ߜՇ=o*j9Ԑsz?~O"gxQ7s`ě$e E D0ߎ YoS8T)\*}礏*-EWnw5*%1ש=ըf TI`ըzըkRz:dhNRmQi ʀ}AbgM* Q0z_u*GNz^*HC*},g r{LχRkGM>"F)93KIxԟxN]R@vc+EڧʷٟaOi AL[p?MzX[,s'[ϵ683Bv'6ZyE:UÈ ,^ںtt Q)- Ap Q@pJA!)p҇1K%v8Yt :nA'6zB Ieih;`;x5쭦;hB0xcuCm:2b:Hj?П-`RpnFju]In+c,;9ö E@Tn2 l;Swx%wy_]h;Yee"-jK/k!UY 83ZHMSeM D+EOʝt |3)<X& e$yU)oW<vJtv_7=$ƃҜ"+-Ry;4&]nUnNYL+ +3.%HX}+"O}J$c|nteDV0v$4^0^3+N&J^, !\ՋZ;ǿs ɩ.{g3>0uRJ>(ێ-m;iZEc9&Cݔu'˝93UoOj؟ޛDo?7Vr˭VvF0;l/{F[R>q4< 2" h {G@!uJ!}`La-nWPZ{)j'ok$U Fg\ʤMq 4$R&p? gLR%ڗs,u1{nuGzzUJei6oY=4CsX$ӋFF+a^{m#"\?8(2!QJ` q}􇢃|TR YC uLIPw6sr % 5q^WcH q% VZQF[\c y=W'ܾ(R)!E QL}pDtv$k2pyUo*w;t73pc?S|S?o~ƽ V Txk`Ԡa[7%W+tϠڒ'̰zzJ%251rɦ*-1pL 5L;\QUj9KZMk-uelz}_ڔnŅ?,w%˃NyM/O()a~:EcV.nt>"!I1?LW><]w ZEk6)p-we¯Unh˃ f׷\=dRɴ\|C#8m (@\+#Q2 20˚;{$rn9rµdPl܆?=Ӻ/ìyƫv9:&ZG^)>G/kW_X=,;W&m>B{8 {CNT6_۲QV[֫VD dհaEƱ2Eɺ*3X~P[6deD 18Bv*-V|aXă KJD4~nVȝL4g܁  [y2 Q$f 8UNpDN@vnQt:i@8E"H%8pi9JALyW#O%Y'Pi) ̩Ho3Om#45eew}8_ eWբHJb}nemٓ`ӽIvhW"Vb`0J9ŏ(H5+AV.wȪ,+ 'RSYiѫ L^ GLY[gisp+R9)K<z6~QȆr6-Xכ=^jNzxpVİw<}p2 ӣ[X@ ʮE737x ѰGpCZ-p"47ĤQn\ nM{>Tcm9oMӁt]p1l`}Na{{ץs9qlO#O~I>ی<=flp. .ndwqN盌s(2tB(A +JlxX+FY3v"FTWɬL| b@=2kg}ށ5W굕s/@JSx+YQny%IJW}<'$a:5$6OU "ƌ:#JG߽Ԍݻd>۞6lV|Y'`cͩܫNB|▘NvX-#][ޱ935\+ DѾdV%P27nm~ɲH"'__W_MGg8}@cqBN:T̳LXsfbwt@XW&(8tX&Cv-.m#wgJVjht35ui6؇|˻ f>?e4h,/f -+Y ..KV6+<2s5>|*œs-rvֵ|,ƪ nukp_yvU6wnhkv'97Ś7swST FWHM$z!vj_Fi)( 0RfbЕVyAT:3r,N4(:tR1I]\$z:d#Ht-<*t: AF2N or8EkΊT20@hvxMlt2x MiڑP6Sz"ɚrqK0Eڿ|υȫgo]Y $yUP̃~j$8My6|)-x!iϷa|ղ]x?~%|!NNz~6瞆`2z~WOKg~|gfwWѹWK]U1VUgI݅ɰQƞ5睯oVߍuuz!|G9bY SUWB3WMq}HӨ\%"ޮ:OgؖY)DD JAҬJOJQY)hfaؖ:,mhғRJ;TSZ)%lpӶRViVjJq! c6ƦwޱCVb nmހw'AV6h F$tU?t[ncP]4n̆E؃Y<}vGc!{*7JU3K8K@6?mFHP?R%=<0Ci[`Hj㣃G4M gn gbB8QV{^CctS]rY1=9y#8gíOv8̜py< c{n(*A0ӥv]2Km1It[-n)>ՍZ&pXǔۉGLn \oAT͚COw]M=SY Nz")vD$=$[o}-7aF-bҖ;NϬCܨ:ؙmz^}c vzhF!¶񦊛չ`Rj:Bv:57e-wW3S|9/ P趟U招D/bX9܊EL*ؔE&E*ؕ6Sy@<n{x7^G@y7vڌZz?҈3G`i T褓P~5##=;>AaUJ̬|X#Ȓpit0i0.%6Vϒ~ ;(] `e %0Mi,{ U#XvaG2ଈ8$6$E}S/KAd`5]%x HsQDSIIO\7a wB{f͂X=&Vfӊ5;C$3iMidZ3fPZy)%vS&L%z[L*m d sv2UEf"6q/F-*4YVR UQFy`V(}VR`3PexEZ` uˠ;Z ',ajګc_j D)Vk m=/k{ևf4`hN0\Ua<`QR]8#wqS1Nntkn3}ViA:s}߆ѿքzڼWCjQJ9tG-jFPd~k}lbP]!^veԃACzEZj 1exf VFàUh:SXDGz tih]ul];,ŶyK=TiP$.@۱,͜{GS5jfQk\Mx"A̤S댞rJka >Ew I6rJRfZO]NY-1n O=:z,f^yjEǕ8,XdWnuD6R/RD1w(j$Ok3 !j׃wZ~!S)‘ܫio ?릫w\$FsZϷ\^;ҡF1/3XX5R t ,X+VMiI!{uAWIqi|a3a/Ѵ,VzVUꇝ(4RҬAX)Z)Rb=[egaȉR:4nv.?$IKe3X7Jn4@i"MƙH!9_T+6'%ĺ[<j- Y6TbgW%k[Ws!6Y>a-D[}:c`hX`kJ}(:X#*Y5%Nl$v7_T7 n l2skdCGu|̊m鯲_m;Y!tWw6އ@Ft7| \>/wZI?WBد يܮ\eO9yYIz}kd412=И9㷻6=' =m ^-'XA^_w={O1HW*e,c# `k:\ T{z25uo8}}[ F.MbZ1:T/h}q77^Sl1e\e^e Uٌ)w2 ]BwY͍נE YR:h$!t3'{JR+X`EPI~ʰȡrLJ 0;"?_$FZfюI"_M% Fh+fm>.W-LNf`tEaYYb~K .*gQ D*Iuz¿ߘ1,#.b[ls٩cDdީIScDr0L PKtoUven ) a9=je=7%{\$+Lq/;eRsYGe6NDC46V\.XT =dxU,gd`\y^T1?Cȗ976ECFh#? {4nJ>_ؿsuVxx4q ֒,~ VEqUtWEqUt\=y}f *SԾd]h#h҄-J挝՜:1iܹåo5?Y,kF>}Uh&?z5z {e\ 6}zOe.,ug,z08XFj%K\V1f+I+\<3MP׵-DSvJ<#OmMo 齅 bE)Q65 L*/7eeLeY)e]ms7+,}tU)ۛd|ʖk^0/#);V5fHq83Ctbi4FwM" #!')KH*E5aW;&CPD CMJ{>21FTY,(ʢ ' KpY6T KbRCM)iGےݱ荦QA%W<]+䔔>7ѺQSMc2(A,8e)`pb Ni&U2'bT~<pcӈ$ tJD,S$1`IqHD" 0bj"e,ʘԱ]j@ V.ʐ 'T% (Dy.U`C 5ͺmdÜ+Zp욫-0W!-OfFR(-_$#+\M퉑)An TJ/nSK$k{Fa꫊QG٣#ʙ,vly]C8vS!L?{Sv1ϤH Ē$ eg1,FR ,YF.pg+/gb?@eTkr:芣J]wD)WE5p{ O=cAZOKΞEp[#sh9;*@TD"TM{h 4sTݚF&W'ԘkZʬg_WYLV{Ul {Sᄍ;Of0Yvx{XV7UwWm$}T+y^6xTՎti[7r˃fu` La$M!rvǩ1pvt*B_6ѝ<8Δ* }m4&G`k}ޟit|{\#Xh 9BJc\OSG?hfyZE-?>̢xI dW7? v<->?0⫿ Dx҇-ѱ+5y"aM:f6 WX_]~A"8k[dC[t\ߍb ~̔P"9eYv ZŽhhZjjѴn (:3@8EPͮNtQztѝ?_.v.1ɑbBɅ5'&Wb~l%f*+sO–-^"4ﮊa$R:FLqj Lv4Q݇=+80.+\%#w拁v5GBBr)h7*9=j7_ \DM[^TδpGBBrȔc6#,EH "8STk^ZIDy0Cz(7y3"Dw!T{Ha:BYH"RU<9R(Hs f 0<S8 +_ tEyK .6zк'Fy齦Z߼۟k[`zAgҪ!;ЧɃwaݓ{-F3u8V{ 'p"拁q;O-mO򕋨Ls,4>XM_ \DM[vu_9Q\Dd7Z{MP$Ab":h"E3yU\D+2XϙPpR m"jϋc-ya)K\s[@Q+0˜G}v?tG(TSimfΓWǟ1W$$[>T>fԯWw5&rWu}f:&U2>jS1raUg0_n<@V\j@m-dOQMB#CY2dG8z'}γk-)J&5\ *r @kb ~jFC B$J`VP"h&5b<56(IcT&`CR!~<)9OZEoVu}/-OLĤr_D_ `,:ؔZ:`zq/n oĨT]?jb㩦mԢ||7؃ɣZQtN=zY>9]WwgBs逿rCwᡎtͨfTߜ:&W.bİwEZE`R[xKD9BNw^Z m-47u7Q B!_![Hj3{)'FQł, 2~R2a$(wǽCf(*Qյ KYnaմO҇4߾y٧hݭ^N5{cd>FEd1*[YOvt7-2[ 0Q* ~iRDĥ>7 /vz_m570L GR4>}x)TVCGtj(!AgIM*8N|gN烨IuEJRN!PR*L JaΓKU^'U? |kMK%b'%NIH%ꃒK󱼂.B |:?[|)B9aeKJ>3#JpRJ:t{|}Dfגb2~e@pUڲXlb~2߃b ]?LM}£%yqqx#W۟1dd54O>S>?9~,&Φ*O6K 9O.%  RzJOs,%E?' 6oU Q:#>-8ۖ,&7x/޽\k-T:-9f6g_yb6|*ΌC^ҁ%[$M ]RF&|hP&$d;x/ˣX()JDwL ã"s܍^;:1'sBw ˸ I0'&ԧOE)qvuPt/GE:T~N4 "YԽ;{Ju[_Vn/ѣY:X-q%~<.ylC= ~ԕB ;z Z:8u@FwYZ b["5&/WKRҠ]NaFRs@QZrchZ0'4uJnq?iL5.g7-Q&Z"@Yhެ>5] 9#%'e.Xy&d@4u)-O^TSܖλP&j˹ITke]KZ R0d] amԷ.2Z}!~bl 78j՚wxz[=( oo#Nٖj0>ٞR kXtWSh=Shs\W~5lY޺F5ƈ ZL@Ӽ6-ֶkF ϗZ-֎u<"M)ȺU.ߧ+$ 6Ȏ m kIz !i\ҜZ`KcVce5zRl d1mRFt&#-ӱ `K1bKv4ő2S4e418adR\DM E*`6wܖlmW簝Z*xz/0S tg]&O(wZDSc8!jaT' 'Jۛ#Σ\R=M-s{ ṋ}h4ЦEi?ٰ™d.>^f[(.^V@Dk7T/&H8&\ E)M( 0FsZ,HHc^֊itqxĵvlgKL"MHR84XJ*4*ReޔJhtZnDD`lcAiF@`H šGES*F"DŽĘb0y`+P\|a Ѯ.>K(Ucs 1hc!x;ȿPNm ܒkqlϴ\_&5l{t!SlÖ+^_Rrٲ|$JVU{hTlY$C`'H ) $ C;1歀;r/4by94vmY֡C…/^}{_#g[Df'OLv@lB*@>}]6o/9WND#-" Cȥ1 ygo*\}=ʫA*UOsDΘ5Ž~?N헛 >LV_#<8W8Y'0`m|Q?9q *h(OuNnh[ߎ3!'箊l_LO/یfizCޛ>7C`\tn)ӝ #DkpUfKbA~^B l,xjv[dY"i<&XZġ'Ev~kX@@ӽc @#[x'&?APATн/b\f>(ZDS#P`g[[?8HƋm ls 9ABE0(cu77ܬ.!nb$d{T:X-ұ6rC2 #-3uwb k<_SOi8I"}miigE ޯ͈T_6RF-i lz?[<yeKK6 2Z>Gw(ߑy] So,R3Ya>F ml8dL$q @A!!AJE<{&t@Ɯ,R" ''t"H.PwtY59Njy._Z#-rd%-RW9ueƤ2Rɘ૞r^.\SU ˖z'B9c q~i'ݽI&R,.-_qGjaH(¶< &И",:V219D 8Ĥ|fSF FIq!R,`"$y4 EDA"CX' #r A6+HTrAcT Y苔T߳dS^(== 9x />MdDp5IeE\6ߕ#,|J#fXa0gĂ`1PD$:$I":br#=+ %s;V~\iT@W41#`c6*HKFƑ6uBQ4KD"X0 TŐq6#EeJ##c0ʅćxH1sR"Eur0#FC0:Hz%1+aRH±kޙ%1# 6>4B*wwJ3,!G+e$Nbw4-`syqsa###h#O"Eu=#E2!hv B$JJ-T{v]2~a* uLOA G*;7^w*;ڨ3lMe-yw@2&;L9S4sQ> Oƽ'uVw: 7^jW~lWW{;"ϩ4+1$5Hi,{M!e^5ĺq^#D^K],- Ge`ح*\oWTp1.2Ϳj ibK7^zXǻ\m샦mlSؗ'3>2K2ͬG

kt.=zU*=qO'F2Uwp%n jN6hYS dgڭy\v!!rm$S jqi7 inM1pmTn"$!37c%knTdzxc# ٩·kS3s[ ~?*(G>G wP*/f rS?Jao>^--xL,ymYy6sX=ۧٷz>"h^;ug?jfaf5qI|3kηoi,, 8" GNGYVa3ҍ-s ɾ u<KQRmB .OvHI!S1,~5"_t-Ҹ" !2/#H&%.vnb.tv4œ( IpcXG BZ` +4P.4ҢF(b=Df&ƫ"3q&'1vOC^^>(&%PV'z":*weŝAK\{9o&uqJ"52+9%w+`Dk}^"W60,֧: ')\h:ve) vyY.R721iڮ_0%H e :V40֚K#ԬKDĊEb`2FqD#Ex%eh_Rh&9@ qD-S @bs$[Ҵ)^Ez{:jB FHM(Q ##! C҈!l<4TLF6 0a|xk^[O1  ^37cm[+]t뿏#I[Ky}Dp]:SÖbiwev2;{7nrk^Tc ZUs&%L?EX>hl vpG\b%mr`F n5)z&݋2JA't;5>OxrпG3 vEa5e\MlmuAFtz;o[Πطxiֳ:mϘ ԞOvlK+#pR'yN#ͅt.of`΍d)罚\n蜹m]Dd̓S-J@(]6ڦU`sڊiu'F &u%cMZEPiF e~x[ti*>ht*JvVё1:eHpZE4QF!)%+ )a"TL$hh$0X6H m5])LFӝ[탲w@?-% BY%(hqW Ub~ Att\@G\|4.SK/0cLBKbߺ0 q=1yn`fҷְ\W9p)3Kgumׇ7rGYM-b5x|uڨ˜6dfW7URic!k6kP Q:\M@MW@@@8Se݆J}NHws&y,B%[1J}Cfxmi8DHq[|f30zj@;_hg-N Fvd3EP^!O\soxt?hqHBBT,UzͬJVQW*޳R'FO7st8GeUpyNvC!!% |;`]=JTPW76) Ҏ.1^26E!2^moRW׺Uz jBBמּέv)Rrϔ]nR] Tn N -uWSuQ䙺nT]2$$ .wvCq0ڇSgdx{w\@ߧ|cG"%JØ(DaB m8ԔiBbehU2I*AY@$NPAYDH(44"4b!`Ʉ&J2FZCLX,ØI@@ ! B $O_Eh$>ZţQVG$\72ӯ_wFڤ;ta6,Y+.Toeygb0<t߀zXkV>&X7~B`.ӿ;>4X"{A={a(R`j:5;>*%304 ^!GqM` to_2"gxj \å͞227Hͽ 'q~ q8|DvԘ,? ދM`h<6]0Yqew1Yu;|8OAvm¡z*II J (QRyُj& zY.xU"Ȃ]/8O 1ײpac-4œ Pq[֬L`P2s[bs[ԍ`< P]/I OP9"P!Y& $jb d6&oVS\LvσVVs`g6ͫXƙV87nWcifIKK1$‘C6[;w\@Iu;Xf[n03fizy6Ϳ#X/ߔ<}fa|̷oj4{M9(ZX>˝ J p>/ |%܃ёM8KO+«A@2ǦE}w/uVHm})KRCJMRUN"ܓWW bn'{/S\ Gܵ\t/% Od\7:v, $ q~w}~_vouBT7AjSͦZ" ag(@ʵL! s]zԫΉŵ sMgb "Sۖ'?d?Y:V]%kvO{n{_Ę}8AF&K<$<.| D}m76 6v#9K 6 l;y.Hl]3숐#O m}ydEO_C &viNzt/uxHʑ͕sEQv1t>BtM;;ӗQ/|};Y]z$~ΌINV/:Wj4+|I[V/6]##1ggo`”V%M@Zq2/|]oӪІ25aݺ[ZOxO_PkwI=i)t$]vB!e ~Y3Z.gw֍-/)ضCvPF؁ x"fhdDžhꍴ}h 87} K+ã^83L>s(_ߍcUUHwIj$ 6gnVmB"GQ,oWl$ o۵ :SG; oN%+\% ;3l1uʟVHrS""=HX'I:5#3R2j鯔.Βi=yxjv&l7%?,'G/0%@yxE(uq@MBp&[FIs]k_ p_{oe.Qc(މ7-^^].$(;Fvl6|]7:&a+5 Kk=ɰ2KՏa1`uE噠4-B͹ia?WQIPh-C1;K0-~?F3S )Sncxɽc;2y?l ?'BLǹL'JJ3\pbH*c(}\OMl]_}z; 1dK=~OE?aF/W|Ȋ_0sܵwﮐ3}B?(- 2=w~ǎg+_={GEcŴ jt-WжxW>lDB_7Jq>r>(CIt&A)3Z%~zwT"(+<ڂ\ 3yr8PWyTVzXiUj)`yV J>MV J +D+EgXTeRVJ I^9Pq~bNƌwq#5*]Rb/4+uQbO]*(_SuVs3p"^ IwyZ=)o'?/Q?pDإ+/!hEwULEretN\MnlqjnFz=vѶ5_ZZhHiTOnoǰ"zchU*w0fƽbo2U:9}]dtJ'5$w&f㷏S^ilGݳ^ͽy5cB7CE';@3x&zrWaz[Ȣ `V/@gҳ;8ǎIbufp*$< ~XAVԇ]h7#@|Ǹ 0'8]:aLfO@\v̙=sB#<"&LInt *b%fR4ڳ35&SSKfb|u,ogI8Ζ,Hfl~g7boޔ-vd11dR-Q8&eB@Qb"ka%B$]j9C5hMՕVViC#k &~uh]E긿lkְ*>eF jYڸZ3_>'zj,_dO\f=V7O]`gԚ,}&}n@AwH+/:J *v!kؾ0!o^] Om(4)mjxj0\kf 8RLڤK2 "땚hH^Kwwe.:QLHTR0zfeݦTSZ+LPNq4}FhY`'*^ A(Ld$k|)Lm&>6s!!izZh(LpѳFg峞sZ!6^rR^i gzZڮ `m~s|(1JR>mPe[)i?+%mvy AX!U|E[f^E^sK^M xgT 4=S J]VσuX~'G]Ipk9CJ:7@q{+Sp8}*?zc-,(D, (d酨sM"N QʄS |K[f^oC*IWpz=qIH,$`ҙ^puvz6bRD`}cbPW@z$}MD=DY^N ju^6v]p?ֆ~;w?ְS l?>NXE;z= T9R0iM[!`IuUtZqI:Ԙ't)|+֊}W)ks6FW( )mˑD`q$X(Zbn<$c.Iz5pS41KBEFVi'lb-ovZ-Ѳ] j  +Q3S6ڸEtvw{PAzANk <#:nXawlU=Wkc.I16юz\v6I۟TWJzPväDGܸfٲepJ}+3%pB> DʃW;#<=qY.6ƴM5p0Pbo?[1Xs_ wcMadI*bH}=.,4m4oi8ӼqyS62 `Y̑ D&fQ#< %<0$aBISM)״FrMXf51y;=N%ae!֚Kb` I Q{:MLb,%2S$qb=HPmF8|t%飻cO}:|j:_Q{QBP5r(Lj:x ǫ HO;x7pHDOQB>ZƗ靀.%-?pd|v 8=ƞh(&6³b +[>F[ş~g7"oEޔ-֔I,2F[N]YHQ$3˙Id"ύ2?7mQi'1})ox(jA 53#k#Tjv UN#kamXFY&H4i.8*1)lF,@ 3XWcUnCLU@@WwWUqKqq)nW~m%3N S.[8˗^.rZ`Om9 & &\ybEa S0*ԒaYaBO9.Dg<w1y5 kp-Z2|;V‚z(/<ӎ#3Q̈́v6^ na?"Z]AE +63;=|^~"8[k?.e_}8 `~Ó.]1<5EFhu+~y5[Y9Sd}^c OP⩊ˣLģ>ŗ(>8b-z$#>ٛxbDF\Q饛M+bUGFt=;^\#իiZiuԩrZieRO 3 SD4w;$kP7öoyanO-"7Jǻ C}."#Y݉D\}g) IJ.lS"lv̡DkQft$N_eHpv89 BS\YW硭r0]hBYCb |rƿVmў pREwLw$Ѵo,劒گ~IĦG>_cSo 3sÅa5ONNӫѧ'm:}x񝇩}|<[IW4cF? h,& ^d,B5M^z#M#BB(GA@AW#H\Ph.c$Z..σ]<vyr5X^ Rz1Hj$D3B摑Lo4Җd>(\M7#Q9FvN1x k #*IerUT:엢aEJ&x) 9darg A(Tfc{ǔfY+9B̭+ne`6XPbv_ѬiO7wm b/DHz#\ ݶt=V`bZ!R8GSc{C ryu8vgw` /W ]3K9xryG dR(uh.nÍn.- ) VWܕƼ@P u hRjaqUY6f6`kK ,sȂJLF̗-idHR-X֕FowLĬa/!:,MXbVG]jPNd g\y_m%Pʄ5D9e"`/ A J# %'aX3,_/ďזCi #Btŭ$d(%5KtK8neaWNt5vT^̋-$uhm+_MMHoZ./0mZx{痧)P<>t] ۸xתۯ[璀C!<+)JW]޾xƐdǏ7diA:TU3Zo("o]WL 8pٵGn_U5eNbkӷ@֥pՆ)T0?D L$m>($z`xN |KY:f4EEgs$ͳA5(9p%𔽄ؒmċL& C&\(k\> bQKa]UT{oF.lh ~E""srZM=-ïܥ7e& /Zm&Qe;-VT^-.䀯̔6Y$d+sw"xY]y0|7avfއy~yJ" KSS;ON¼^hm5<ݍd,JQl #Q8򴰥0DZ i7XѺzʭ7:1m`A>e]GI$c GT .DJ{3{ ɑ`ҲZZ~Id-{Dt2qAhWBR &3'o|T_‚} B-5 w@-I\q˼>`yj'뇓3'Y>_j6vAc_-O r$9f\G"jQ!pgwT;Iex> PfU͠8.w w/f'O^Xl*')Dۋ;{.q/Ϗm-+5KM'*B!4Q1*ptwx#ünixY4t}?HyBJʗ EũWkZt,%ÖH%,2T8kxs7I$gǕ\GWⱪ)?!ѪYrUlJQTp ss?9qh[8Qxy=ËƵRWj劗RYC m)d $!b+%s9\10[V7EDdAk\vmtHcQh;Ռ Ռa5#-.3T,:7iD5=TPhΥk=qMUPTo6009R]/?'۳S=SRӺ+;S$o^ѝ٭Nk>XD<ƽŮMIМ{\:r8%cuۚ狊ΎAwMT I}bTp׌z`LTPC>+( _ѣl~AKrk_땮ԌTr:|bg)Yi/a8\["Q&ZzAﰱo=_<.f4nc(wwN (k] d;?:s&7q+0 ьCB ,"d*Cp t#a#1 7ՊТv|u`8Ojtq<+5W_Lzcs+ fS{6a=pCf?6t,E#h|4%dSB>;jWz^u^eDje% y&eS'oy7)z\ RL'Mېq6ͻ$z6,䝛6Ej|R)sǤ :@vTi#! ~q[vK.]r̀щ6T&])JY1.<{D] dI`Lj)ߕZ7N"{1f"Nz,9Ibbr,T׺G$c{Ś_>3f{gNxuNftuiM8U3`TbcHIѣ?Riz*tEI |e%4V|\lPBO)N!Gli]ןkpq"='׍&%ס^tY%GP$rZz(k)8}pz]5J?ǬovŒ,h!3!:D:`px ՙYKҝʷl^0 [ F,o)Jʅ"m~[XtLX< .߻_?SJJZ-TNtRO'E%+gwh%)HAK_:f$9%G1F79`Pګ5NEcTŢ^}*GӕLVf=V#;*7V\ hg[-|&Gk~1pVV VEp^SܮFj9jJ'w=艦+9\mՂ+}Q^b9#(u (*Wh-wq;Jvq45jxsXtV/&1c&WK>q?)R=S$xuv6{vNNg\^]6~ǃ&kr0A5O//mRt4VY!&{1]Fq^{bq4W5O$ZO,eh$<-]i{pio5iaC潌i{h9;WgYu>=,wEBtD<8|e>XPDviqDbY_Nyq2,qN2ƻӪ(eNg0r9LCiM 1mNoz&`2:𤊶GqH6qA9~_hHzd 7i3ǒ0.2V3=Z1Q [Lce2ZlcF'hF<*тNf;r)#kQ`+N豉q+4SeuvHWH1ڦ-fEt4 3W\,0!޲,87]x>Y e$'FU@\I!Qa (uN9>b{T8i.D֠ȑ*0s#$EĔ2r8؜ %IvxI_~ã3tO"a3^ 9 8lz1 Ls,*HRal`/b]\oүH":D?@(LMbׯgrsos hζwj}vB(i?+Q"-piN)c-Nτ (4o$ifims%ibH+LBOHzUVXf\N9fi?|z?%)?CNCC拫K_,}ܣ}h=ayG@DV0%D2%5XZęL QIsBc,N¿R1uhПjzЯgww`{VRvY(My1`*f~5׿YϜ-?衡e?xKWGFRilǝHvbN5Ѧc:(k><>ݴru-PO?oϢCo.kknFŧM͐ j_lg*[W./IpYmH*Nj*}$%"\Hv\3l< A]#,X:5m BH$cVa AE*JBPs4҇DY%m?&7!^&0' _rpa;˧WqwY`0:,÷99p 2\ B0vru py3%X3KZ/ԻPzL9h-퇅b(Nc" uXOq12q;N1M_ w~鯳 qp1⎿X%>_>y@[-(H矗jn.?DK ?#%ņ*J,w:TU, T?\wWg)J*fis&}YTgnp`75N.J Cx.^ <)O7,:QWe?ETn/Txf+4s^' <"ȿkP+Th@V% ( Y=θxBG!SIndWmX!EOJKtc8x/ ℠/ .\a5eO!^n@ԌipTODn|ٍr>&$߻|ąJr 򬌟/×.B. c>|>XYɇ,O ]aԈQ5)o ~p&|_]Mn>\ S:Zr[ΕͿ]% ~v˳,.{)5t@+1VSF)$ˤ+}qGQ"4QVr-4[؈U9 -vzql.0OCyQ _nGbTKbx]M٪鳜qVR٪)iotz)?FNR F;_RC 7LT:T:볆pIrjjfݧYPsSE5IϬ`c57;䥻_:a5D`p%5rR/a_z0v%j y{pY``4r8'ޱ̪k?1]FF'S9x[ 4aT[ 6uiP߆)&ƭW*%dbog8w;$Yc ظ'qSFo)s猣ڸ3wT*(/z4c-A鯇 $hҪ8MܐFҽ۔.&Ch~ٯ50_%4䉜ZDlNdN Hsd#2+jk*ňb"I&HQ 1 ddh .y&r ʶzIZ4mzBeZtRLwſ4ק1' WM'ԧ&Es72T5["9+Imz9eKNK 0+ºzU1_Ok3ط8C!P&pfj$>?!(:e|h6p5=?;cf>RsZxpjLT&1mJIhyAя.~͋(]UQFGV l܅10^>+VK%߮6bvՂ:9d>{2aowK` CΙ]ս u1lZ #F~)Que?V&_*]]hUZC1XjNlmLyzH@NRujGEa9!ȳS@i H/1bg HMLK\4_vϬv?_K̿`–<.yt.1KF=CoA,ejaCL~RLJ|hz .M(yIYp,pQ@3KoMҌjN(R+aT ';o "%K $m2{MPiH ^H"z۰!VȁZ߻MP1\ z#i0//p^D㟥$q^Q̩% MM)N~n>iFvKkӥwkRֆ|&Ħ'o{7Yyޭ)%Ӵ:ktwԻa!_lS@ht5]_> fS>v:z;QHTl$Е \T rBgWOpp{?_g90ov"D?trotBNntV(+ u%B+-d9e'԰/GFJýu5HkiO>4Ԙ Zc"Vwz}f"1KAuQ8|$oyz c8q28ߝQ2FW=c9̳/1ֆ+l 'Uěc]#`?bD}8} uZF}J|v+j5XST9Na|y99(Iy/S3rq 7JE^G.a2%'&=#lҌ~XT pN2Xk#꛹;X|`M{A|Q(Fbo놰PX9\މUo UUy^0 h OM]"2j@| h:MZU= '28FXQRT%&-2HDi u"0,5p^M%u x(7U(v°,Be, 79KӤUR?R8~Zܳ)¼Y a j\nh,Dw]tt/kSH)H Tc\ĊJD9/D;5!Һ7f]=m<0K0ˊo0B9%m;xaEe XzYέ-[|E{ӖGnN[,ݻw F^[^':,h%AE?vEnȾow¾?)5xC ZK*M5(;`wskB ZKP@^; =$R 咐DR^ᯬƒ$R,A~6꽁PH֝- *)8c1!Gݳ!+qk6?`H 8"e=e I&fXG'a?\jTPv mV')% 1&| 9-FQ pSs_`4.XMH- p}ԃfHD5nSj#$)%qJeo-֭ U4H/oݐN ^EQgBZ%pB;] UtON1 rjñ\'Tuřk7ӢnJr0NWfaVX[wwZdn5zsPv(l9܌~|5n-zl$#!>/)zOG+5[l÷?L >-7Q2Ϸ D$oc5;D'n+sW(\FbVD!JO'*5.`vi.vNalE6ȢQ{r&ɾUZ KEᇡRrloehocw Ape_Nj mG1n3K?Uy|C 1&9Hij@$1Zqd(&55 iB@.PVj3^MKNNݙyHg1uZ@Defr5$`ȱe55qm>5PR$" P7,o THq̒i`U 'P`tJScj al7bXo@^\M$)Ђ#N`(QDNQ'b_1㢸b_XuShȄFVnI%jT%` @+12RSwL 0b`T6GEZ$%ڿxa8{E}Bdj({ F=m cVoGK4%?G$<׼WrO!4jaV~2\.ޠm|Cc[cLgI<3yx |ݕƓG7mxCƵݙRC7'ёNuMOh -/7F};:C-,˔|*VyЃ|e d賵77{2kcTHIŻw9L&޺LWcr$m#(-&uh͂%{'+{1nK A!stUp1wnN[sOs+#,݂. kk[V "`\sfpV{wlmvAv+0{ܵ蝹ݫI"B WlU_8wR)G2Prz}wF SPw Hj]e}>;E_xN(hlG[]Xul7S N F"UL-3uݙ~v:Ԩx&qb(GibDSm}G9VZ3 PknM1+ J£XŢY:<ɣҿ-w I˽ޅtB^K >7gZPv Jm,Vi2:->!ݾT|N ئM~XS5Y I)5z,BIID 7]A$VW͚?VX7{OEa 0îC^L޼XZ.'_{Oԅ J u+M R &PZZ93،PAgU%9(ixBR칌WBݣjs մ@I|?1FQnՅٹ b;$&NXm'sLI3;bn ܓʂ{sfcPج1߼uxڼDm嵶(?cZYnk!'LpYLg㳘MW~\廎u l<?Pu1MDcd\YK`2;6Na,d7'nY,]j|/Q/2ܬo{SB0&'-e.V.7&|0:b<9ɘe7Y|lѭk`_;T*-p8`HG!3/S9G+S+& e&VšJHxJ8rB %q dJte`*օfEbʁ<^؝EB0dJ _q% +(F"IpXҘAL2$!J 9g{kNr~1X[[݌S-WILfo퉉=A$-R4I"l mxB\n`!3S¤PT .99O5F:IIe{;~ցB뵽Đ!oc4@IC$XsWXHiHU4?'b!0r@ .5%柧]9v#]L0[Sh VˇmuƔ@BwW<`I~ӛ%0BF}sebZ3);,E=z\ Ch/W]#ϣc&Å?OX `h:V<$Q1n"{ Uvs탲)Tqi#ojgSGxh"z]xiV`ϥ'cD8%ʢs9xvb=x{]GgcB! ~蝥y8몼1rpalWlC#$v)Ђk81H35q"¸ ߊmގj3^OK;L.jY<*3xA@D)s] ([W>[fwLFKkov}*Yj٧l}5c"^;_!y./qEgi:`;'e[doi?Tуtuk{wֳlj˶b֖u}XG㐎Hp Jc0eE;;9`HMʫ.D_} AnEE$Rx$i2M)ZI )JXH!XBb0/ k[: ڙ ^u7j&9tIS0%nkg)M86ؒ\2mXY햢ImaN(vHX%0 !-3(1T);+S%E 4 TR ь$CaDz*D ڎy[yyBe;rw8ġ1&ˡ+>t V|Եs|CKK}/5r4ʵTxjK.`Kȝ:;M\ɰSP/Z5{.;Ƿ}XǔFY@X+X\U:jps;XQ[OGWC;ͻ~&g2_Bs;j1Z [({w/[!XY'%SY%+x꺲NzγNhsr%p<<~GsYu'-9r{EzH[K:1&a p!qPiu;l74Ϡ#i” , M{ FBRO.!$ y1#h@O "'wR3>`OKqk Ӳbꧥ\\_%vr2-=& hROb3_Zj멀b-s]1 XSm:M'Qjjuk5NZU\jZFHIuyYqw=ఠjT,WH ؎6S]v.'Ww`>?]$D޼Fr9ܷ݊$tZ uyGdZ-§=w%%u%aC@rї$5.El 06YFFH'*hx0l!ʮ6@ # ?[?{F俊Y?ܾ 1Nd2KdՒ,ՒbG-vX,~)AB*B'}@5R{fW6zOcon6L|LOѬ(>LS fYpKH7)RsI}?~7ov4uRȚ N0}&D~ߔ[V_~19Sn~?oJ=5 7VVf]_^U_ҡy!]Oy~rPRauѶhUX⾐^"*5xLFtC0QB$9Kn(fgfBd5"'5Tfyk%'l@ߺKb##P2RHF' 8P )OyR DwYx' (xV0xNn ؅iؕ~KA^bt$`獛f#;槳<%*w'=zWQ5}I%e^#uh OafK.i=^T,JR[tU(&uiB`>`J=v"IP~q6KHucu}_tq6D תeW㯋!kvݿ/BX,? >x*p҆C!,47[#KG +/u`":KWjW۾ZۇBb~Anߥw),cAY.30-vfog4qcΟYoWRׇ۷Ѡ˂A⟳y44ΥC <~ Eo;/sĭUj?ӷ>'Zp0~`Y&@,t(n7O Ei'HVQCenWsУq>:pnomjhA~oE\H!nx@PŚw.UU uQ;3/knD~y8s /`yRj~ KW:*.HC^9s,X]%EI]%ђS.-uD=Q>Z &!BRJ ڃ hFi Q!9VU=hD)wYIU%3\͖HU(9qMa7G9]@%`Y[1ך d^b^K y}U>kQ$rPj┝yܗy%z}Q}| Z!+o0*ąˠ)q THς&_q ^U(9pB;OdФćB6(NFz"\RR!e/T:n-җlSIK-%!b}E=G7.-4 Dp4,WknP05\nǰZ7Z)&zkb4T=\]ur)X[d]l ?sL}y:L/A>_#1'|4} ׄϕL@ÇO_WqUc%CrҦ7)"J1եTUefi U ȌVz!MےCY|ۈܗjqt[.TRU#VL㩕 !T(E)-BNeC =N)GU P^482dDo:]߷""({{ ލr-6ϗ3/N@1`DL`~1>w h&R(a ͩkF%_1f-|Cg2 Z@Ӈx7g5鿯j8U>={wp_sP2KҙWw/Em.k9*n.޼f}=ɷrr>SU;LCpNL|OMv>F!\{  ϹmM"' ӳ?bo*F&-Ƅڲd ̞]^DK7(sL@>瓋N+([&w19Lt*-N8DC;m LWKE>AӰ$}j; 3̔duP2Zڀ XR * OⴅCk&vvdQ{3EBLM."IxdcB";+c+l$K"*&@|y-R&ipk-؁ZH<8L 1D,=h*!$b+bAL9%x0RrdɢeK"=])DS(pQ(B32 B~I10V"ј\:=+QXD7y0Q1ػFndW lK$;ؗn oY&q[$ŖldI&Vw݇qVTXEaF} 8z}<3 GhRÉL&4r5 KUD("~Q Q=4 $F$R}*S'RB0Jwvt_ l񤼟L9(Ǣy319%cT򊠕$-1"@caazr $d0/$MTkbʹ G{'܊6ϢL$$Y$$Ydi9o~rC BTbpcӠw^A#L@nsD׿=z&F~޳σP#[HjzM܌Q/xԒwo8gI5C\@jc:)m q$ژ)bT9bM*nG[$!߹QE7$K1Hqȉn^hSÆngѭ]ȧ|,i - uJ#'WD[y{btCs>p[(3!lڪ!Bؔ7"e4,Y܌PSMlMhqb]^-}$[SL{rO]Y&.1쟗j|hc֟]/o#͇25CfVFW=7ݥ]-'ZKև[CU_>ч@~wgCt57ݯkaw*ʡe·0ͦ- m!u[> dս8ux|:«N ;4^+yԑţ5Rx0:a$ |*~jK;f@*g]?b# "nɧ}3>1(Fslސm I~nD,ag;Q67+v6Z[(542V;Ԟ[-4{+}RGρ9`J[!DLjmntST/gg&%wNCn<5"=]d"=]^8c'/P?q}ЍV FTPh]gRf0]yX"lf(>ܒ$45R7XzZimQ!hHUi0Z y( ғKv#Hf)%95M0jL0a9%V6 h#@Ug0D#ID5? 0-(qə\Hxn.P'q|gE 9\AJQ%]svY<TSe>(?&RWkydS$FgQ(.(M"_90HųLbILр03?Fɐr[8$fVlEsSo4d0d Z`3 l0h@&OCm\Oψdf1~:" /.)=fpdOD'$i!3g 1DVA%wwdƏXEp 0k|q{*k|>ŕZ|5> ;YIR>>7: s/W˿:+j)n|ӶQlI2WݻcD\f`,bE5V(ǫ$c]Lh&Q&I*c{!e;b=-/V~^EU-M%g ˪iZ1z8n?>:)Yᄐ(B ;ފ84_v1bH#41ļ\M{}C 2>MQ͸Yݾ_;ha$'f{l t 2Q9tt(q#VҾV[;s5sטǬg FL`J:v²PxJ}(}&lNb4]!&$;DZ3 ,M"5"XhbChXMV Fqُ>zD(y}gtǏ:bבMN?%IS۰gu0Qqy\: KX 5Eض)I%Ŋypj@ݍh"4wY+_hט{cJרAAC\RPƪ`-sD! BUhYJ#8.36XDMn3'1gmnIpTԓ|K󙢘YěZ T$~Tq$3}(С28cjB8\&>}4'%=9"0} qAbIL*"a@5P((K-NWnȀg'Hs8ΊgfbALrٽf}ഹis76`E[L {])˫{cw/K;^9OW55LqحشhK0ȟ8Ž!6Afв)¬̀\ny!!K1-[%=<b cb8.#K2[^,11\Y$\Y 9޹rXt<cbx,q7V\(+9Db&n1:b!!7<?$cf.MǺh۔ ]bĺwavlc問,\R91T79W+ w7ђ]n<`gǝvB$g˛%͇VFW?񀾻essD{apwqiuk7h]Usk\xCyrMk.Leo*ER Cxkaw-s>i6m3>@6:H >b|s>d:\"?+C Ymڂ E^cUy*IJS$;ఞvv 6ms㣗|wm[{9FoB?rwQ="q#G8`?S~>MQ&4֓9JG~w[O.Fzko4$!RJSd˿5;kuQDMP9J]]sҜXP B]26gI] vԟUlӼӶԜzWLy)KucL/=$mc뻗j/M#͔F&fٍ81]ͭ_V[PKх]z◘r91:*U3Sb1=n7Dhe(TGʐ*mW9ڛR= #6j#ddmw˸2Q 0lle(Q3i^m}ثQM4FV}}UG& L[qNAJ+WIQO'f D*¸ jԢvv-U3HV uu+ь#&@kET{θSD /ҩEzN/t]"Q,$oTQ!(ڇOKF({}[VłV/W~eVRY0YR/e&=gu\\cΊv"8?^2UrSyB&WR"B^W \B(0rBZeKM{*I#b߭L1Fޕ\5K@&[\aLUi^WiJp ZΓ^XA)Dsym% )yC4Ь448Dڀ?}T3>t2FWLoZ!h҅w1X(M#%j% VmN6*}ų46HzY J)۟+MCVnb,s8B(jy`ʚTSƢIkz;nLvfF(j&kE};a{+%́m]( aj (cICk&<㵥0LNݡL+]pք TAc\ U h`io,8svWw_[IpNϫeZZU:X[)m0/tyS׼x xϣ]{+UC|?w_(<+*% aXfTVbVG%8ƉAYߛTAh^tҕ ʫV&2gN8uVP*sG_7YPP1A&Վtu'U=B"__8^ [l*FSUq(E.[mj # 妺uqaL*q-iHe8 HJ`Ojpru]k+Wdb˖dsg &#SihVf3?ڔ+dI-ɥ8b~b(H4LY)R9Ɩ0,^?^>L#W$Q n'WbVjV 9=X[4)xCέW%RTz 'u5a6 ΙFi)ꊤni8P`ȑ.˥`=f['/R8¤h5Iw[∜ՏŞ>U.'QDu>\W:Jlf~ԃD{OoY_\u)Bn(8֎TӣOlgUŞ|}6ϬΆ99cuu)v^VH*Ϭ4Bmo񸇆'Mn?P9c7E-ۦf܈دJG1DvOUOm.z~cZO8q37ʭ> 3%/Mxulh);w4qU] kJM}~nF *V냿}<0*ffL7y(AAFgChUZOI< `IfO}Ώj<%Ƣ#Z; Sؕ%񫶰i8/z?wi.#֛ؖ(Y9Xt‚<~%u.SF*"p$=ٴOJ"=Y\CPdNTocE`'>\ q ˴\\.=7y(=>.d5nrTGfV; H!=:!ڈ6D5' ^^_Ӯs1;Pg3n'!,&dؔ殑YŲ'釡 m@{$/Kw~ q)2C)"QMYOB9{Q^6ԍMIͅ~WCPM"'a|l>/sSCj\)sWȾ:=`>S*5{Y? wD=0_P\NQ)1Q,cSINv~"۳=D%G$sS2TvSE"F`9 hB5~rl㖦&؄Ф8SsH񻑇bSsS-↧ ԥU5A[bb%')U5 ũcvw}莊vc@.?Ľ[+X%EػLtܨoVǾ1G4D殯:|ؖz0>׉@8VjȽpJ<]&?5Ni V (F̀gy';?yQy%ҰygmbE"ϺJz+V!U2hcalй.QhCߒFk>"Ǖ&4>n4;Ƣ#ʎu:gO5.{`BRZ*6(; @֞Ɵ,F{fc86U^Fa47sNbF ܄sԍG7bZO"툲S+&'qV)e`:..@r'c+֕CtvD١F~B'qVE/y %"Fl՟R BK-$ ϝ\xl2p3'ld`]2n8~w7B/1uVB$覦&˱b: ^{?k}=>qdC"1DUmRl@?{֡?#8j]B1()n .d|Z]}Yw7hIy(UdD؄F)TS7:=~џ9O<-uC#}I .}0#:^4b: xg ʔeK+5? NܹA ݫ^@K KN!x.m9>t|n7c9*Z$3(:M2sν~}wg:%Y@N֮Ƿfn}^IBF5|qOKw_?uxYZR ڹ~Ʒ4 NuϮ \s Auנ8|djó 2.[5bk7ۋ+.]&럞jC3wU̇"6{vЙ``q]Hvm/˧|Wؿ#ȡ^}0G ɱY8kxw=˙g{>u(i>g*J1 v\xRG@cw&f;iTԎ\ msa56vMUQYj'kv݀̈́2@ذ2 G7 XG rډ]*QXTGm2!5R?sօSq6lW۹cJ`sϗwSbblds}[82If|= =:@u`: =ɏ)ZcGK}'|{.W!\Q5f ΅`\ Qѫc:F⏆~iГN-O.G9WcGB75g8"`-DB VfCO޽}㿹[7~Ǎwh):xKv: kn)Cޙr4 )p`ZsRl"wqHE˔l!Ś)5Gt:|N9bE]Q,}2JgD;q)XS ThnnOlG}wBܻ:gƸ^*)h G@I5b9FkgÎn@\gX&y9RKH42? xҘo4i V(h+t%6TUFY׊j~+iZX >t\/H#p;{-7t<쩞\}Դ_0&mP1Jb <2@T2q](P6+6+a4:R<!K.~lhAP95oLJ&U6G[:mM'o(_?PWMRB(u9'/$NL7%h 5N+s5 gh~%Hl&B u夶BJh~J8ݖRQJ)N3Ěin&gRhT}q9mL&` $`֚*GЕh%OEOm嵙fNef`VHHs s_]e-YA#`xeERqxA촒P[O?9QjDm(ViذMl#̍RheTu&Ȃ%RE[t0?̺Zo}k5,;Ko.nG `K*^CrZ;s,,`V"nlZ] (&iP6 U$Ѽ)9cldPu?BH!q`JDGb(s{#|GI)[:'ͯN/OL-_d=e Ei*8Ttz] Cm>pux#Hk 913sYSiIh42BP=l4m풃 )PςXxR}ɬi,@/\J1&0y\mR.[ DE19D `nŐ]Gvl7_6Yޛ7$~nȁo>;v{dϛvW:gl:CZjzL|DZ)J'd+VJ7썝qeО␠-uDf TAAb[uqQr7nPYT[û%We¬XV*VAjwOpύB'c?0vftu-OZ+%%: DOaկ$PN%NS4VBq]xZ-})Ci&1ݯhTۊa%bF2\ԍVɜAY{e$`` C]&7 _.%i9xqր j,|)WHJstJȕRۧ@>~ M .rqQ7Rrq3jZj v`'Wٿ!i:~ӄe]Ȃc-p 0t\u+2Y(hw_Y 9-èvϔ\bDogm%wz})EC&V]t.pfT6(YZ}V:#vRr{Q}f:+H1&):dt.z\Hbd?K|4s %LYnq͜b\cuW"4qBג6u aY aE;@c̑'Df(tjO;d͸ثƐM}/wK=H 7lЃTF+„#+TCP+?[Ƚ5+GvsO|`#%p{<461g@2)@QJyz\;V *׮5xT;V!}]BXMƟY5DGB!GhG&!UhFՎ*)AIAZ˿(bELTK.kϜW(^Y:M_ zuO?_F pqW);] ŠUo2}YWI^__K@LX*Y˙?&_b`@&JG?^ ܥ/bk}=I{wIEN֑Ұ뚫:UCnm}W'M0>y*GxJ %ب o@ؚzCJe/ǻV)(gYV^Xz)#5Rx)5B|bY>gǍ!d<ʹ oUxN^RY, rl=ޞt{yǧF;CѢuY^//RRĖT6X )K(C9K%u\=uv x{zb_EeS\zkvW@N=~Z>Ykl.ݤT<>o?JMJog~aTnR,f;t٬gڻ{)E)MJu=IL*կ,ղWreׯ_ &xFN~W RiǏ'_}GsުuZOzE"wVCMl ƅldY)^X$ &j ZPTkmRgܜj'^WvpoO&|1}F_E H=7[6Vb N>SV%KTΒ=ѵXbھX'./\Qp)3ۃA?Vd ϗHӄ2H649\E^6;o@Yu϶=ooGOkqAƙX)}^^NN߆ˋ9^S1{ʇ>54q:#_][>K*꟯f] )1Ʉ³?v1z{<}l|A7.^i4ց4İ(Fbk Xuۣ PY38zYu}|XFΟYϯX15w1%z޼g|13i ACeW9[u^V1|Cu8\#۵jfĐR߮k"GN{ }S~oVnÅE##G̐Sۓw`$oo=}5Ϧ/|/)^mѻg!1q =ѯ^@fțhw qߎƷ߃W RIJ9QXn.>JD USXs$ztM&sX \M@%ExOճk]m%dvx\5rA $\|T'y&OԢ!I, \qe\,-!\D2$ 3'IT>}X?IarXI(̤İ(PDlӌheSer Rp RL12Oڲ>Ab~U*K|:Z=-4{>zu2o+g"HjW۶I|k$VcB`{v(g韓Gzr_f6+cJ 8쯿|?z3Z 'g'+*gah~KF+ٜs`/eTLkCOr9y8JRssm>QowVlg Ďז2v)j:&ps<^O̾IO(D~YAq01.걜8Z S\ vre㈼1!@-eyE?!^)䤐_{gE~)^9Bjl 4Eebη~~u"$hR. څR/F6q{Wh:},~v2a!OU\Xlc&M#bJoq zMPx"U)zP B-s(^ ; 80'0h7A79ѵ"$_$IXe c 9(6򌤊rIfIDz& j5K,]˴LhR& rb<3,U8B}Z2PKMuûm9%հ'LJ)SH#q)' 1Ha}HT9MlL D۽q!u:)kWIZΗL+$FzgI7ϓ!p+ G >_Pch?sBwN9VNGzogoO}뜋\`Gpx"K_3_<+A$i==ԩ]8ʓ8\)IVx`d}m-P~XvrOB^0BL j+њ$.`4\؈9do25UAj^MԒ~{q*>j Tk'֍1dM Y+5F.]]luj}f?Oh CU$7nr3O -WcOxB0Q }u2wqƎ . eeʺ% E4Hz;}h->Fv9 oaw»{j6$bzMT $.Bi#:knA";(b2-(mb=Dh}6Fhe3`l0*tPh"δR[&SZ**C-%bŀYB6ĕS]*cib68X(63GIw/"qDn.xꮄ\/,>O)R7|.:_ۡpLW٪FY-pĆ'6XrD+/ϖ`R?[BkiΉ`5g70~A͉ԨNuZQBIፗV82sVJk1Rsާ yH5[zErݥ[yC!o/y,MnWj7&3燢D]v],׏T>ԃ^M@>3W h':Q)6(\h5Q)r9 0}@ojP1fe6vG ܣh $4q:5o,q9@oa->uކx s?wt5ob5Z=L TyA=;qg+5XYwXvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006526173315136660703017721 0ustar rootrootJan 29 10:41:41 crc systemd[1]: Starting Kubernetes Kubelet... Jan 29 10:41:41 crc restorecon[4757]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:41 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 10:41:42 crc restorecon[4757]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 29 10:41:43 crc kubenswrapper[4852]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.220423 4852 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229687 4852 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229727 4852 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229732 4852 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229736 4852 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229740 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229745 4852 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229749 4852 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229752 4852 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229756 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229760 4852 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229765 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229769 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229774 4852 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229778 4852 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229783 4852 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229787 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229791 4852 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229795 4852 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229798 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229802 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229806 4852 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229809 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229813 4852 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229817 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229821 4852 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229824 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229828 4852 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229832 4852 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229835 4852 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229839 4852 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229843 4852 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229846 4852 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229850 4852 feature_gate.go:330] unrecognized feature gate: Example Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229853 4852 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229857 4852 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229862 4852 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229865 4852 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229869 4852 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229873 4852 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229876 4852 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229880 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229883 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229888 4852 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229895 4852 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229902 4852 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229906 4852 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229910 4852 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229914 4852 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229918 4852 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229922 4852 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229926 4852 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229929 4852 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229933 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229936 4852 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229945 4852 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229949 4852 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229956 4852 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229962 4852 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229971 4852 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229975 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229980 4852 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229986 4852 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229991 4852 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.229995 4852 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230000 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230004 4852 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230008 4852 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230012 4852 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230015 4852 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230018 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230022 4852 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230122 4852 flags.go:64] FLAG: --address="0.0.0.0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230133 4852 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230141 4852 flags.go:64] FLAG: --anonymous-auth="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230147 4852 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230154 4852 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230160 4852 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230167 4852 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230173 4852 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230178 4852 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230183 4852 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230188 4852 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230194 4852 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230198 4852 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230202 4852 flags.go:64] FLAG: --cgroup-root="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230206 4852 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230211 4852 flags.go:64] FLAG: --client-ca-file="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230215 4852 flags.go:64] FLAG: --cloud-config="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230219 4852 flags.go:64] FLAG: --cloud-provider="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230223 4852 flags.go:64] FLAG: --cluster-dns="[]" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230229 4852 flags.go:64] FLAG: --cluster-domain="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230233 4852 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230238 4852 flags.go:64] FLAG: --config-dir="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230242 4852 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230246 4852 flags.go:64] FLAG: --container-log-max-files="5" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230253 4852 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230257 4852 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230262 4852 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230267 4852 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230271 4852 flags.go:64] FLAG: --contention-profiling="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230275 4852 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230279 4852 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230283 4852 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230288 4852 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230294 4852 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230298 4852 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230302 4852 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230307 4852 flags.go:64] FLAG: --enable-load-reader="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230311 4852 flags.go:64] FLAG: --enable-server="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230316 4852 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230321 4852 flags.go:64] FLAG: --event-burst="100" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230326 4852 flags.go:64] FLAG: --event-qps="50" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230330 4852 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230334 4852 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230338 4852 flags.go:64] FLAG: --eviction-hard="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230343 4852 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230348 4852 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230353 4852 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230357 4852 flags.go:64] FLAG: --eviction-soft="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230362 4852 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230366 4852 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230370 4852 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230374 4852 flags.go:64] FLAG: --experimental-mounter-path="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230378 4852 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230382 4852 flags.go:64] FLAG: --fail-swap-on="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230386 4852 flags.go:64] FLAG: --feature-gates="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230396 4852 flags.go:64] FLAG: --file-check-frequency="20s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230400 4852 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230405 4852 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230409 4852 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230414 4852 flags.go:64] FLAG: --healthz-port="10248" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230418 4852 flags.go:64] FLAG: --help="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230422 4852 flags.go:64] FLAG: --hostname-override="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230427 4852 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230431 4852 flags.go:64] FLAG: --http-check-frequency="20s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230435 4852 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230439 4852 flags.go:64] FLAG: --image-credential-provider-config="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230444 4852 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230449 4852 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230455 4852 flags.go:64] FLAG: --image-service-endpoint="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230460 4852 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230465 4852 flags.go:64] FLAG: --kube-api-burst="100" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230470 4852 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230474 4852 flags.go:64] FLAG: --kube-api-qps="50" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230478 4852 flags.go:64] FLAG: --kube-reserved="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230483 4852 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230486 4852 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230491 4852 flags.go:64] FLAG: --kubelet-cgroups="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230494 4852 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230498 4852 flags.go:64] FLAG: --lock-file="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230502 4852 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230506 4852 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230511 4852 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230518 4852 flags.go:64] FLAG: --log-json-split-stream="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230522 4852 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230526 4852 flags.go:64] FLAG: --log-text-split-stream="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230531 4852 flags.go:64] FLAG: --logging-format="text" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230535 4852 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230539 4852 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230544 4852 flags.go:64] FLAG: --manifest-url="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230548 4852 flags.go:64] FLAG: --manifest-url-header="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230554 4852 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230558 4852 flags.go:64] FLAG: --max-open-files="1000000" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230563 4852 flags.go:64] FLAG: --max-pods="110" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230568 4852 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230572 4852 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230597 4852 flags.go:64] FLAG: --memory-manager-policy="None" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230602 4852 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230606 4852 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230610 4852 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230614 4852 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230628 4852 flags.go:64] FLAG: --node-status-max-images="50" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230633 4852 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230637 4852 flags.go:64] FLAG: --oom-score-adj="-999" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230641 4852 flags.go:64] FLAG: --pod-cidr="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230646 4852 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230652 4852 flags.go:64] FLAG: --pod-manifest-path="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230656 4852 flags.go:64] FLAG: --pod-max-pids="-1" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230660 4852 flags.go:64] FLAG: --pods-per-core="0" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230665 4852 flags.go:64] FLAG: --port="10250" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230669 4852 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230674 4852 flags.go:64] FLAG: --provider-id="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230678 4852 flags.go:64] FLAG: --qos-reserved="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230682 4852 flags.go:64] FLAG: --read-only-port="10255" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230687 4852 flags.go:64] FLAG: --register-node="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230692 4852 flags.go:64] FLAG: --register-schedulable="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230697 4852 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230707 4852 flags.go:64] FLAG: --registry-burst="10" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230712 4852 flags.go:64] FLAG: --registry-qps="5" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230717 4852 flags.go:64] FLAG: --reserved-cpus="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230725 4852 flags.go:64] FLAG: --reserved-memory="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230732 4852 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230737 4852 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230743 4852 flags.go:64] FLAG: --rotate-certificates="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230747 4852 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230751 4852 flags.go:64] FLAG: --runonce="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230755 4852 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230759 4852 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230764 4852 flags.go:64] FLAG: --seccomp-default="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230768 4852 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230772 4852 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230776 4852 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230780 4852 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230784 4852 flags.go:64] FLAG: --storage-driver-password="root" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230789 4852 flags.go:64] FLAG: --storage-driver-secure="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230793 4852 flags.go:64] FLAG: --storage-driver-table="stats" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230797 4852 flags.go:64] FLAG: --storage-driver-user="root" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230801 4852 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230805 4852 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230810 4852 flags.go:64] FLAG: --system-cgroups="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230814 4852 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230821 4852 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230824 4852 flags.go:64] FLAG: --tls-cert-file="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230829 4852 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230833 4852 flags.go:64] FLAG: --tls-min-version="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230837 4852 flags.go:64] FLAG: --tls-private-key-file="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230841 4852 flags.go:64] FLAG: --topology-manager-policy="none" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230845 4852 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230849 4852 flags.go:64] FLAG: --topology-manager-scope="container" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230853 4852 flags.go:64] FLAG: --v="2" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230860 4852 flags.go:64] FLAG: --version="false" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230866 4852 flags.go:64] FLAG: --vmodule="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230871 4852 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.230876 4852 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230979 4852 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230984 4852 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230988 4852 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230992 4852 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.230996 4852 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231000 4852 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231003 4852 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231007 4852 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231010 4852 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231014 4852 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231017 4852 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231021 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231024 4852 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231028 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231034 4852 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231039 4852 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231043 4852 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231047 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231051 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231055 4852 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231059 4852 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231063 4852 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231067 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231070 4852 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231074 4852 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231077 4852 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231081 4852 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231085 4852 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231090 4852 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231095 4852 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231099 4852 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231103 4852 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231106 4852 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231110 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231113 4852 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231117 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231120 4852 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231124 4852 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231128 4852 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231131 4852 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231135 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231138 4852 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231142 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231145 4852 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231150 4852 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231155 4852 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231165 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231169 4852 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231173 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231177 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231181 4852 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231185 4852 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231188 4852 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231192 4852 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231195 4852 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231198 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231202 4852 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231205 4852 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231209 4852 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231212 4852 feature_gate.go:330] unrecognized feature gate: Example Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231216 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231219 4852 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231222 4852 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231226 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231229 4852 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231233 4852 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231236 4852 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231240 4852 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231244 4852 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231249 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.231253 4852 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.231269 4852 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.240764 4852 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.240809 4852 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240885 4852 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240898 4852 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240903 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240908 4852 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240912 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240916 4852 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240919 4852 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240923 4852 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240927 4852 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240931 4852 feature_gate.go:330] unrecognized feature gate: Example Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240935 4852 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240940 4852 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240946 4852 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240950 4852 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240955 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240959 4852 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240964 4852 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240969 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240973 4852 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240978 4852 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240982 4852 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240986 4852 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240991 4852 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.240995 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241000 4852 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241005 4852 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241009 4852 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241013 4852 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241017 4852 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241022 4852 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241026 4852 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241030 4852 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241034 4852 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241038 4852 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241043 4852 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241048 4852 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241052 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241057 4852 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241061 4852 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241065 4852 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241068 4852 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241072 4852 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241076 4852 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241081 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241084 4852 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241088 4852 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241092 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241096 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241100 4852 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241103 4852 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241107 4852 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241110 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241114 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241118 4852 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241125 4852 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241129 4852 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241132 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241135 4852 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241139 4852 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241143 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241147 4852 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241151 4852 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241156 4852 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241160 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241164 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241168 4852 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241172 4852 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241176 4852 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241179 4852 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241183 4852 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241186 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.241193 4852 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241304 4852 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241310 4852 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241313 4852 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241318 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241321 4852 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241325 4852 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241328 4852 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241332 4852 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241336 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241339 4852 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241343 4852 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241346 4852 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241350 4852 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241355 4852 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241360 4852 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241365 4852 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241368 4852 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241372 4852 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241414 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241420 4852 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241424 4852 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241428 4852 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241431 4852 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241435 4852 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241440 4852 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241444 4852 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241448 4852 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241452 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241456 4852 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241460 4852 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241464 4852 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241468 4852 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241471 4852 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241475 4852 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241478 4852 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241482 4852 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241486 4852 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241489 4852 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241493 4852 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241496 4852 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241501 4852 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241505 4852 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241509 4852 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241514 4852 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241518 4852 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241522 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241527 4852 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241533 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241538 4852 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241544 4852 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241548 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241553 4852 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241557 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241561 4852 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241564 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241568 4852 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241571 4852 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241596 4852 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241601 4852 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241605 4852 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241608 4852 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241612 4852 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241616 4852 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241634 4852 feature_gate.go:330] unrecognized feature gate: Example Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241638 4852 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241643 4852 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241647 4852 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241650 4852 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241655 4852 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241658 4852 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.241662 4852 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.241670 4852 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.242792 4852 server.go:940] "Client rotation is on, will bootstrap in background" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.248395 4852 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.248504 4852 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.250673 4852 server.go:997] "Starting client certificate rotation" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.250704 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.251754 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-15 23:15:48.010490186 +0000 UTC Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.251869 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.286664 4852 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.290371 4852 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.290475 4852 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.306545 4852 log.go:25] "Validated CRI v1 runtime API" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.351474 4852 log.go:25] "Validated CRI v1 image API" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.353495 4852 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.364652 4852 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-29-10-36-41-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.364721 4852 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.384654 4852 manager.go:217] Machine: {Timestamp:2026-01-29 10:41:43.382688686 +0000 UTC m=+0.600019860 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:6667fab7-c571-4f1d-8f14-ac3c8ed4cf48 BootID:6d167e52-c8b4-491a-b64e-fc81a677c102 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:0a:f7:b5 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:0a:f7:b5 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:a8:a1:93 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:f6:bb:27 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:bf:7f:2a Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:e8:61:27 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:69:f4:9c Speed:-1 Mtu:1496} {Name:eth10 MacAddress:42:0d:7a:9a:e3:25 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:d2:6e:0b:0a:8f:e3 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.384949 4852 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.385099 4852 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.387166 4852 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.387432 4852 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.387471 4852 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.387767 4852 topology_manager.go:138] "Creating topology manager with none policy" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.387780 4852 container_manager_linux.go:303] "Creating device plugin manager" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.388298 4852 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.388322 4852 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.388541 4852 state_mem.go:36] "Initialized new in-memory state store" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.388690 4852 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.392259 4852 kubelet.go:418] "Attempting to sync node with API server" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.392286 4852 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.392316 4852 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.392331 4852 kubelet.go:324] "Adding apiserver pod source" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.392344 4852 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.396023 4852 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.397016 4852 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.397986 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.398059 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.398063 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.398162 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.399614 4852 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401444 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401483 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401496 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401509 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401529 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401543 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401556 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401598 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401615 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401630 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401648 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401661 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.401690 4852 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.402344 4852 server.go:1280] "Started kubelet" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.403013 4852 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.403754 4852 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.403572 4852 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 29 10:41:43 crc systemd[1]: Started Kubernetes Kubelet. Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.404728 4852 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.408333 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.408404 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 06:52:36.427766198 +0000 UTC Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.408774 4852 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.409192 4852 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.409210 4852 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.409408 4852 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.410514 4852 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.411093 4852 factory.go:55] Registering systemd factory Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.411115 4852 factory.go:221] Registration of the systemd container factory successfully Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.411494 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.411627 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.413458 4852 factory.go:153] Registering CRI-O factory Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.413484 4852 factory.go:221] Registration of the crio container factory successfully Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.413564 4852 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.413605 4852 factory.go:103] Registering Raw factory Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.413621 4852 manager.go:1196] Started watching for new ooms in manager Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.414825 4852 server.go:460] "Adding debug handlers to kubelet server" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.416296 4852 manager.go:319] Starting recovery of all containers Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.414388 4852 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f2d96983be8a9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 10:41:43.402301609 +0000 UTC m=+0.619632773,LastTimestamp:2026-01-29 10:41:43.402301609 +0000 UTC m=+0.619632773,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.419271 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="200ms" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428155 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428217 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428237 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428255 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428272 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428289 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428306 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428324 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428344 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428360 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428378 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428394 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428412 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428431 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428449 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428467 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428519 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428539 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428555 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428572 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428680 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428701 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428717 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428742 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428759 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428779 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428800 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428819 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428836 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428852 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428867 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428885 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428955 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428978 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.428994 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429012 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429027 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429044 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429060 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429076 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429092 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429108 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429125 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429143 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429160 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429176 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429192 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429208 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429227 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429280 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429344 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429362 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429412 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429433 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429452 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429470 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429488 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429505 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429521 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429536 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429562 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429600 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429620 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429635 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429654 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429672 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429688 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429703 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429719 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429743 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429760 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429775 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429791 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429806 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429824 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429841 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429859 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429876 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429894 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429910 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429930 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429948 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429965 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.429982 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430000 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430017 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430036 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430052 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430068 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430085 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430101 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430117 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430134 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430150 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430167 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430185 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430202 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430217 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430234 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430252 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430268 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430287 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430303 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430319 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430343 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430363 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430382 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430401 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430422 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430442 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430458 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430474 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430494 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430515 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430534 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430551 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430569 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430608 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430629 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430647 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430666 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430683 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430700 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430717 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430733 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430749 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430766 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430783 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430802 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430819 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430836 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430908 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430927 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430943 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430960 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.430985 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431001 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431018 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431033 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431053 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431069 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431088 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431104 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431120 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431138 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431155 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431171 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431189 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431206 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431222 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431238 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431255 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431273 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431288 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431308 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431325 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431342 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431359 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431377 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431396 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431416 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431436 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431455 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431482 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431500 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431516 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431533 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431549 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431563 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431604 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431622 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431639 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431654 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431671 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431688 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.431704 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.436657 4852 manager.go:324] Recovery completed Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438278 4852 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438331 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438362 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438381 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438393 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438408 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438419 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438431 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438446 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438458 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438471 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438482 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438493 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438507 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438517 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438530 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438630 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438672 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438685 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438697 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438712 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438740 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438755 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438766 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438776 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438789 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438801 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438815 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438828 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438837 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438851 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438861 4852 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438870 4852 reconstruct.go:97] "Volume reconstruction finished" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.438877 4852 reconciler.go:26] "Reconciler: start to sync state" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.450755 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.452323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.452590 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.452604 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.453353 4852 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.453372 4852 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.453387 4852 state_mem.go:36] "Initialized new in-memory state store" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.460400 4852 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.462113 4852 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.462150 4852 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.462172 4852 kubelet.go:2335] "Starting kubelet main sync loop" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.462209 4852 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.463642 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.463727 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.475670 4852 policy_none.go:49] "None policy: Start" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.476416 4852 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.476439 4852 state_mem.go:35] "Initializing new in-memory state store" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.510647 4852 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.529895 4852 manager.go:334] "Starting Device Plugin manager" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.531001 4852 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.531021 4852 server.go:79] "Starting device plugin registration server" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.531636 4852 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.531656 4852 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.531998 4852 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.532081 4852 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.532092 4852 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.540947 4852 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.562561 4852 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.562791 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.567661 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.567711 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.567724 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.567920 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.568989 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569053 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569073 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569273 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569320 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569335 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569410 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.569458 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570700 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570815 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570870 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570897 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570919 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.570931 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.571084 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.571223 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.571261 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572242 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572249 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572282 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572295 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572268 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572360 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572433 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572461 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.572486 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573149 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573205 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573272 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573291 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573310 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573320 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573642 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.573723 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.574655 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.574735 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.574747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.613344 4852 helpers.go:245] readString: Failed to read "/sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/cpuset.cpus.effective": read /sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/cpuset.cpus.effective: no such device Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.620346 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="400ms" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.632409 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.633884 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.633938 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.633957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.633990 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.634650 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642317 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642359 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642384 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642407 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642428 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642488 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642526 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642551 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642576 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642630 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642673 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642709 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642732 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642762 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.642783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743796 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743853 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743881 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743901 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743920 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743940 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743959 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743977 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.743999 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744027 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744017 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744038 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744092 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744045 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744131 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744153 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744157 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744152 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744173 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744177 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744204 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744219 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744216 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744274 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744254 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744257 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744306 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744242 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744246 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.744460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.835349 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.836540 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.836568 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.836596 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.836624 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:43 crc kubenswrapper[4852]: E0129 10:41:43.837054 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.902366 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.910646 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.936519 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.946749 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: I0129 10:41:43.957313 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.961261 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-6eddf6ed076817f6edd395aa57e2660668d2d765ba736ec68b8cdf3522539440 WatchSource:0}: Error finding container 6eddf6ed076817f6edd395aa57e2660668d2d765ba736ec68b8cdf3522539440: Status 404 returned error can't find the container with id 6eddf6ed076817f6edd395aa57e2660668d2d765ba736ec68b8cdf3522539440 Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.963389 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-71e1510407943f1989c90e5e4ac1f5aabc827557719ebbed28f55fbaf9ccdff7 WatchSource:0}: Error finding container 71e1510407943f1989c90e5e4ac1f5aabc827557719ebbed28f55fbaf9ccdff7: Status 404 returned error can't find the container with id 71e1510407943f1989c90e5e4ac1f5aabc827557719ebbed28f55fbaf9ccdff7 Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.982113 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-0282d24b54ba3fb4a98d4d7e83efcb1368bfd3f097021d0a03c43dd1476815f1 WatchSource:0}: Error finding container 0282d24b54ba3fb4a98d4d7e83efcb1368bfd3f097021d0a03c43dd1476815f1: Status 404 returned error can't find the container with id 0282d24b54ba3fb4a98d4d7e83efcb1368bfd3f097021d0a03c43dd1476815f1 Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.984203 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-dc11090675244c373d7464972db06e493a94f7eb5a3d4064a7996b1ddcb350be WatchSource:0}: Error finding container dc11090675244c373d7464972db06e493a94f7eb5a3d4064a7996b1ddcb350be: Status 404 returned error can't find the container with id dc11090675244c373d7464972db06e493a94f7eb5a3d4064a7996b1ddcb350be Jan 29 10:41:43 crc kubenswrapper[4852]: W0129 10:41:43.993204 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-2db2848b76ac9fe6fd625f23aa208fb58afa0eb7789245ef30d1e732b7e57da6 WatchSource:0}: Error finding container 2db2848b76ac9fe6fd625f23aa208fb58afa0eb7789245ef30d1e732b7e57da6: Status 404 returned error can't find the container with id 2db2848b76ac9fe6fd625f23aa208fb58afa0eb7789245ef30d1e732b7e57da6 Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.021464 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="800ms" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.237665 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.239218 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.239266 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.239275 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.239300 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.240352 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Jan 29 10:41:44 crc kubenswrapper[4852]: W0129 10:41:44.261277 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.261356 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:44 crc kubenswrapper[4852]: W0129 10:41:44.329614 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.329773 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:44 crc kubenswrapper[4852]: W0129 10:41:44.350676 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.350782 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.405090 4852 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.409248 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 06:28:24.777017633 +0000 UTC Jan 29 10:41:44 crc kubenswrapper[4852]: W0129 10:41:44.415279 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.415348 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.466058 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6eddf6ed076817f6edd395aa57e2660668d2d765ba736ec68b8cdf3522539440"} Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.467603 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2db2848b76ac9fe6fd625f23aa208fb58afa0eb7789245ef30d1e732b7e57da6"} Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.470670 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dc11090675244c373d7464972db06e493a94f7eb5a3d4064a7996b1ddcb350be"} Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.471984 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"0282d24b54ba3fb4a98d4d7e83efcb1368bfd3f097021d0a03c43dd1476815f1"} Jan 29 10:41:44 crc kubenswrapper[4852]: I0129 10:41:44.473035 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"71e1510407943f1989c90e5e4ac1f5aabc827557719ebbed28f55fbaf9ccdff7"} Jan 29 10:41:44 crc kubenswrapper[4852]: E0129 10:41:44.822520 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="1.6s" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.040760 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.043389 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.043439 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.043451 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.043482 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:45 crc kubenswrapper[4852]: E0129 10:41:45.044051 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.404820 4852 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.410010 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 20:26:55.2131812 +0000 UTC Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.433708 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 10:41:45 crc kubenswrapper[4852]: E0129 10:41:45.434717 4852 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.477621 4852 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="8c2c6f545e9e367d20dc7633ffe4d770cfb5287d1fdf35058816d6f69062d7fa" exitCode=0 Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.477711 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.477704 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"8c2c6f545e9e367d20dc7633ffe4d770cfb5287d1fdf35058816d6f69062d7fa"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.478569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.478618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.478630 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.480012 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726" exitCode=0 Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.480072 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.480191 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.481234 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.481279 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.481295 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.482709 4852 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2" exitCode=0 Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.482759 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.482790 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483013 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483854 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483888 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483905 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.483964 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.485917 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.485964 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.485983 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.488176 4852 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d" exitCode=0 Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.488205 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d"} Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.488284 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.489216 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.489253 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:45 crc kubenswrapper[4852]: I0129 10:41:45.489268 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.404672 4852 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.410191 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 18:01:38.61853097 +0000 UTC Jan 29 10:41:46 crc kubenswrapper[4852]: E0129 10:41:46.423908 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="3.2s" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.494099 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.494139 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.495141 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.495186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.495200 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.498489 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.498534 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.498551 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.498747 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.502352 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.502389 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.502403 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.502797 4852 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cd98b4d1ffd6f3deb2392c2541e5f789283166209ca43224ec050372f79e71e0" exitCode=0 Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.502898 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cd98b4d1ffd6f3deb2392c2541e5f789283166209ca43224ec050372f79e71e0"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.503046 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.504260 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.504289 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.504299 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.508395 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.508449 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.508463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.508473 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.510360 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333"} Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.510462 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.511362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.511426 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.511442 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.644614 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.646451 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.646514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.646532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:46 crc kubenswrapper[4852]: I0129 10:41:46.646629 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:46 crc kubenswrapper[4852]: E0129 10:41:46.647270 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Jan 29 10:41:46 crc kubenswrapper[4852]: W0129 10:41:46.879489 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:46 crc kubenswrapper[4852]: E0129 10:41:46.879622 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:47 crc kubenswrapper[4852]: W0129 10:41:47.068478 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:47 crc kubenswrapper[4852]: E0129 10:41:47.068559 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:47 crc kubenswrapper[4852]: W0129 10:41:47.083313 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:47 crc kubenswrapper[4852]: E0129 10:41:47.083444 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:47 crc kubenswrapper[4852]: W0129 10:41:47.184202 4852 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:47 crc kubenswrapper[4852]: E0129 10:41:47.184345 4852 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.405081 4852 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.410377 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 13:20:18.619544402 +0000 UTC Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.527777 4852 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="172a5cacc2a164434527fb6826e2d424a279e4192c9df514179003a0fd7db1be" exitCode=0 Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.527869 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"172a5cacc2a164434527fb6826e2d424a279e4192c9df514179003a0fd7db1be"} Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.527904 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.529002 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.529043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.529055 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.532712 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.532757 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077"} Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.532776 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.532869 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.532947 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.533815 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534126 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534155 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534359 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534385 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.534988 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.535029 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.535050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.535306 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.535382 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:47 crc kubenswrapper[4852]: I0129 10:41:47.535400 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.005061 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.410478 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 05:42:45.234454029 +0000 UTC Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.539073 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fbdebc5c401ca83413a02f142f849b56184aa03583efa1ee707e484e872c1a69"} Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.539150 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"41cc0f3c20e6e3238f7b2afa0ee16d23a5391238e63842a12e805c8bfc523322"} Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.539181 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a73c288b8e1fc599e0550e3c16f1029aae50f00071b6bb415fbc0bfa23df8137"} Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.539206 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"cd35889adcdb4d420b097b551045c9e7be1e83c26468d051b0c65606855c79f0"} Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.539161 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.540549 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.540619 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:48 crc kubenswrapper[4852]: I0129 10:41:48.540633 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.411089 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 09:04:36.243448537 +0000 UTC Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.548129 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"27b8a4a075accd549db818f5a98fb089d16950dac2305da5f859a1bc96c7e10a"} Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.548187 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.548267 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549327 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549387 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549398 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549574 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.549675 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.550788 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.737994 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.848081 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.849988 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.850046 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.850067 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:49 crc kubenswrapper[4852]: I0129 10:41:49.850100 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.411374 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 06:17:27.350239674 +0000 UTC Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.413718 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.532874 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.533322 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.535026 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.535098 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.535117 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.550755 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.551523 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.552194 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.552251 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.552269 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.553273 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.553515 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:50 crc kubenswrapper[4852]: I0129 10:41:50.554030 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.377604 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.411522 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 16:37:52.71169596 +0000 UTC Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.554193 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.554195 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.555756 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.555974 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.556132 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.555918 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.556298 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.556323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.902626 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.902811 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.903933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.903969 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:51 crc kubenswrapper[4852]: I0129 10:41:51.903980 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.412275 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 16:27:17.214095502 +0000 UTC Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.996309 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.996476 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.997608 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.997645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:52 crc kubenswrapper[4852]: I0129 10:41:52.997654 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:53 crc kubenswrapper[4852]: I0129 10:41:53.412758 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 19:30:13.566522839 +0000 UTC Jan 29 10:41:53 crc kubenswrapper[4852]: E0129 10:41:53.541211 4852 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.329077 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.329262 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.330814 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.330883 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.330898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.334540 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.413388 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 10:57:04.375518094 +0000 UTC Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.562947 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.564705 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.564764 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.564787 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:54 crc kubenswrapper[4852]: I0129 10:41:54.569760 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.414208 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 18:10:47.182755221 +0000 UTC Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.565988 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.567437 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.567530 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.567546 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.900154 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.996741 4852 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 10:41:55 crc kubenswrapper[4852]: I0129 10:41:55.996825 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 10:41:56 crc kubenswrapper[4852]: I0129 10:41:56.415367 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 17:51:50.766404437 +0000 UTC Jan 29 10:41:56 crc kubenswrapper[4852]: I0129 10:41:56.568625 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:56 crc kubenswrapper[4852]: I0129 10:41:56.569535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:56 crc kubenswrapper[4852]: I0129 10:41:56.569576 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:56 crc kubenswrapper[4852]: I0129 10:41:56.569612 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:57 crc kubenswrapper[4852]: I0129 10:41:57.416080 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 12:28:03.92695003 +0000 UTC Jan 29 10:41:57 crc kubenswrapper[4852]: I0129 10:41:57.669312 4852 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38448->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 29 10:41:57 crc kubenswrapper[4852]: I0129 10:41:57.669381 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38448->192.168.126.11:17697: read: connection reset by peer" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.006405 4852 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.006473 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.053596 4852 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.053669 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.061514 4852 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.061882 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.416943 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 06:05:14.587988776 +0000 UTC Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.574344 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.575965 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077" exitCode=255 Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.576005 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077"} Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.576173 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.577212 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.577244 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.577256 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:58 crc kubenswrapper[4852]: I0129 10:41:58.577822 4852 scope.go:117] "RemoveContainer" containerID="55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.351115 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.351312 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.352409 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.352437 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.352448 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.375039 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.418033 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 17:40:17.155656228 +0000 UTC Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.580448 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.583096 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e"} Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.583195 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.583343 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584638 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584689 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584700 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584788 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584816 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.584830 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:41:59 crc kubenswrapper[4852]: I0129 10:41:59.601173 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 29 10:42:00 crc kubenswrapper[4852]: I0129 10:42:00.418618 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 04:33:31.77084736 +0000 UTC Jan 29 10:42:00 crc kubenswrapper[4852]: I0129 10:42:00.586130 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:42:00 crc kubenswrapper[4852]: I0129 10:42:00.587418 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:00 crc kubenswrapper[4852]: I0129 10:42:00.587464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:00 crc kubenswrapper[4852]: I0129 10:42:00.587479 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.384897 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.385074 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.385193 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.386208 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.386249 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.386264 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.389496 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.419453 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 06:19:52.788200646 +0000 UTC Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.590653 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.591788 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.591828 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:01 crc kubenswrapper[4852]: I0129 10:42:01.591838 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:02 crc kubenswrapper[4852]: I0129 10:42:02.420079 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 05:40:40.924347276 +0000 UTC Jan 29 10:42:02 crc kubenswrapper[4852]: I0129 10:42:02.592295 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:42:02 crc kubenswrapper[4852]: I0129 10:42:02.593235 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:02 crc kubenswrapper[4852]: I0129 10:42:02.593271 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:02 crc kubenswrapper[4852]: I0129 10:42:02.593282 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.046606 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.049485 4852 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.049546 4852 trace.go:236] Trace[1325816516]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 10:41:51.419) (total time: 11630ms): Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[1325816516]: ---"Objects listed" error: 11630ms (10:42:03.049) Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[1325816516]: [11.63037058s] [11.63037058s] END Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.049563 4852 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.060697 4852 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.061348 4852 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.061444 4852 trace.go:236] Trace[1167884798]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 10:41:52.616) (total time: 10444ms): Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[1167884798]: ---"Objects listed" error: 10444ms (10:42:03.061) Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[1167884798]: [10.444735691s] [10.444735691s] END Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.061471 4852 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.061864 4852 trace.go:236] Trace[555226997]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 10:41:51.282) (total time: 11778ms): Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[555226997]: ---"Objects listed" error: 11778ms (10:42:03.061) Jan 29 10:42:03 crc kubenswrapper[4852]: Trace[555226997]: [11.778838426s] [11.778838426s] END Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.061906 4852 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.072361 4852 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.115473 4852 csr.go:261] certificate signing request csr-hnffb is approved, waiting to be issued Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.157924 4852 csr.go:257] certificate signing request csr-hnffb is issued Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.249796 4852 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.249924 4852 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Node ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.249956 4852 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Service ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.249967 4852 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.CSIDriver ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.249935 4852 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-etcd/events\": read tcp 38.102.83.23:44252->38.102.83.23:6443: use of closed network connection" event="&Event{ObjectMeta:{etcd-crc.188f2d96bb9c1061 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:2139d3e2895fc6797b9c76a1b4c9886d,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 10:41:43.995805793 +0000 UTC m=+1.213136927,LastTimestamp:2026-01-29 10:41:43.995805793 +0000 UTC m=+1.213136927,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.249979 4852 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.405588 4852 apiserver.go:52] "Watching apiserver" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.420870 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 21:34:55.642398071 +0000 UTC Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.437927 4852 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.438721 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-6grc8","openshift-image-registry/node-ca-r27t7","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.440351 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.440420 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.440937 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.441197 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.441322 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.441342 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.441350 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.441450 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.441610 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.441614 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.441714 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.443786 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.443789 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.444747 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445691 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445735 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445762 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445912 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445954 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.445974 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.446119 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.446698 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.446742 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.446808 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.446859 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.448303 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.448690 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.458498 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.474945 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.488409 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.499091 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.506695 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.510781 4852 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.517712 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.534934 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.544757 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.553046 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.562402 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563534 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563572 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563609 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563628 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563668 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563686 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563715 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563734 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563754 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563777 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563795 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563816 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563838 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563859 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563881 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563923 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563947 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563968 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.563988 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564007 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564026 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564044 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564064 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564086 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564105 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564128 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564169 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564191 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564209 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564245 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564265 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564286 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564305 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564327 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564346 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564384 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564401 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564421 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564438 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564455 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564482 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564500 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564515 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564532 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564548 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564565 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564598 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564618 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564646 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564664 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564680 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564699 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564716 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564733 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564751 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564770 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564803 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564822 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564839 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564859 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564931 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564961 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.564981 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565001 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565038 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565056 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565076 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565093 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565111 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565130 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565152 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565170 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565189 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565207 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565224 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565243 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565262 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565280 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565300 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565319 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565352 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565370 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565595 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565613 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565631 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565649 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565667 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565706 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565725 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565744 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565763 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565783 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565801 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565820 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565838 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565857 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565876 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565894 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565912 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565931 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565950 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565970 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.565991 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566011 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566030 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566050 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566069 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566090 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566111 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566132 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566154 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566173 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566196 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566215 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566233 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566251 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566270 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566287 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566326 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566347 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566366 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566389 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566409 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566429 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566451 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566445 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566472 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566562 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566613 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.566689 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.567071 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.567323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.567886 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568194 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568252 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568270 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568405 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568434 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568614 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568623 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568794 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568801 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568908 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.568949 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569078 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569219 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569449 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569590 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569695 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569751 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569803 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.569900 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570006 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570040 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570181 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570307 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570726 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570828 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.570930 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571329 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571428 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571566 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571739 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571855 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.571957 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572052 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572151 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572262 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.572283 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:04.072261343 +0000 UTC m=+21.289592477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572458 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572561 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572708 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572785 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572855 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572923 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.572996 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573071 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573154 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573246 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573346 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573445 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573544 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573662 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573774 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573888 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.573998 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574103 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574216 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574333 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574433 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574534 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574688 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574800 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.574903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575012 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575109 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575200 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575293 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575391 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575489 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575608 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575741 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575831 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.575930 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576031 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576126 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576234 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576331 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576437 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.576539 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.577977 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578337 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578445 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578558 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578632 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578773 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578878 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578982 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579176 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579281 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579386 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579487 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579767 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579888 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.579993 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmq5z\" (UniqueName: \"kubernetes.io/projected/840b2302-f5bc-46f5-b645-9a759fe39d4d-kube-api-access-kmq5z\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ac5c30fd-7234-4f76-8005-c86304d0d94a-host\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580182 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580272 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580372 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580467 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580564 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580791 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ac5c30fd-7234-4f76-8005-c86304d0d94a-serviceca\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580888 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv6bz\" (UniqueName: \"kubernetes.io/projected/ac5c30fd-7234-4f76-8005-c86304d0d94a-kube-api-access-hv6bz\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.580979 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581091 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581203 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581310 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581413 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581509 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581618 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/840b2302-f5bc-46f5-b645-9a759fe39d4d-hosts-file\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581784 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581869 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.581946 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582031 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582105 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582182 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582259 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582335 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582410 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582495 4852 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582573 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582723 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.582811 4852 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587473 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587850 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587948 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588033 4852 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588176 4852 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588260 4852 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588503 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588577 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588681 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588746 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588811 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588871 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588930 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588988 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589081 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589112 4852 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589130 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589142 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593378 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.578797 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.583315 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.583906 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.584796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.584811 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.585111 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.585345 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.586049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.586902 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587123 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587281 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587409 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587658 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587794 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587966 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588075 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588339 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588502 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588566 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588688 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599980 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.588983 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.587329 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589659 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600097 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600136 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589658 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600192 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.589825 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.590719 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.590364 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.592823 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.592993 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593020 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593030 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593344 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593642 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593677 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593805 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593947 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594004 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594016 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594229 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594276 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594562 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.593520 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594819 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594884 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.594988 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.595029 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.595497 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.595677 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.596179 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.596602 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.596774 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.596910 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.596851 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597027 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597248 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597430 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597458 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597595 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600598 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597870 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.597914 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598152 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598200 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598350 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600643 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598369 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598397 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598663 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599102 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.598769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599268 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599403 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599504 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599740 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.599755 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.591392 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600693 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.600822 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.600834 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.601049 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.590005 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.601574 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.601861 4852 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.601877 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.601921 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602157 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602274 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.602414 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:04.101679613 +0000 UTC m=+21.319010877 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602501 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602610 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602660 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602665 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602880 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602830 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.602931 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.603226 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.603442 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.603675 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.603691 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.604445 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.604527 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.604746 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.604917 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.605067 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.605384 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.605346 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.605129 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.605507 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:04.105481676 +0000 UTC m=+21.322812920 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.614263 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.614282 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.614526 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.614813 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615039 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615231 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615071 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615159 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.616023 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.615021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.616428 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.616510 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.616769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.618955 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.619199 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619366 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619377 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619419 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619437 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619515 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:04.119492559 +0000 UTC m=+21.336823703 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619386 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619554 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:03 crc kubenswrapper[4852]: E0129 10:42:03.619604 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:04.119575341 +0000 UTC m=+21.336906475 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.619040 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.624847 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.624865 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626423 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626455 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626734 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626812 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626827 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626808 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626844 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.626879 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627198 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627330 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627475 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627575 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627691 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627699 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.627767 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.628107 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.628096 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.628348 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.628440 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.629614 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.631922 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.632046 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.632523 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.632877 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.633004 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.633140 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.634093 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.634141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.634255 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.634314 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.634741 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.635302 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.635510 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.635708 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.635746 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.636374 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.636386 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.636608 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.637078 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.637096 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.637369 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.637793 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.643589 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.645754 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.650765 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.657942 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.670377 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.677337 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689780 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/840b2302-f5bc-46f5-b645-9a759fe39d4d-hosts-file\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689818 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmq5z\" (UniqueName: \"kubernetes.io/projected/840b2302-f5bc-46f5-b645-9a759fe39d4d-kube-api-access-kmq5z\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689834 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ac5c30fd-7234-4f76-8005-c86304d0d94a-host\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689848 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689874 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ac5c30fd-7234-4f76-8005-c86304d0d94a-serviceca\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689887 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv6bz\" (UniqueName: \"kubernetes.io/projected/ac5c30fd-7234-4f76-8005-c86304d0d94a-kube-api-access-hv6bz\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689900 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689941 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689950 4852 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689960 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689968 4852 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689976 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689984 4852 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689991 4852 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.689999 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690007 4852 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690015 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690024 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690033 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690040 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690048 4852 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690056 4852 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690064 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690071 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690079 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690087 4852 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690095 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690104 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690111 4852 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690119 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690127 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690134 4852 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690142 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690149 4852 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690157 4852 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690165 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690173 4852 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690182 4852 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690190 4852 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690198 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690206 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690214 4852 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690222 4852 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690230 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690237 4852 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690244 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690252 4852 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690260 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690269 4852 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690277 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690284 4852 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690291 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690299 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690307 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690314 4852 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690322 4852 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690331 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690340 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690348 4852 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690356 4852 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690364 4852 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690372 4852 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690380 4852 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690388 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690395 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690403 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690411 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690419 4852 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690427 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690435 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690442 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690449 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690466 4852 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690474 4852 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690481 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690489 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690497 4852 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690506 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690513 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690521 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690529 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690537 4852 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690546 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690553 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690561 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690568 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690599 4852 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690608 4852 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690617 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690625 4852 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690632 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690640 4852 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690648 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690656 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690663 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690671 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690679 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690687 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690694 4852 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690702 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690710 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690718 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690725 4852 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690734 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690741 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690749 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690757 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690765 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690773 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690782 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690790 4852 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690798 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690807 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690815 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690822 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690830 4852 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690838 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690845 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690854 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690862 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690871 4852 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690879 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690887 4852 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690895 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690903 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690911 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690919 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690927 4852 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690935 4852 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690945 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690953 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690961 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690969 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690977 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690986 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.690993 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691001 4852 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691008 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691016 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691025 4852 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691033 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691041 4852 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691049 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691057 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691065 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691073 4852 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691081 4852 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691088 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691097 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691105 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691113 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691126 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691133 4852 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691142 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691149 4852 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691157 4852 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691167 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691175 4852 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691185 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691193 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691201 4852 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691208 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691216 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691224 4852 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691231 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691239 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691246 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691254 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691262 4852 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691271 4852 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691307 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691351 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/840b2302-f5bc-46f5-b645-9a759fe39d4d-hosts-file\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691564 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ac5c30fd-7234-4f76-8005-c86304d0d94a-host\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.691718 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.692474 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ac5c30fd-7234-4f76-8005-c86304d0d94a-serviceca\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.697839 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.713197 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv6bz\" (UniqueName: \"kubernetes.io/projected/ac5c30fd-7234-4f76-8005-c86304d0d94a-kube-api-access-hv6bz\") pod \"node-ca-r27t7\" (UID: \"ac5c30fd-7234-4f76-8005-c86304d0d94a\") " pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.722237 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmq5z\" (UniqueName: \"kubernetes.io/projected/840b2302-f5bc-46f5-b645-9a759fe39d4d-kube-api-access-kmq5z\") pod \"node-resolver-6grc8\" (UID: \"840b2302-f5bc-46f5-b645-9a759fe39d4d\") " pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.755788 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-r27t7" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.763960 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.781519 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6grc8" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.789853 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 10:42:03 crc kubenswrapper[4852]: I0129 10:42:03.801499 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.857095 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-0dff9e69a0d270f9bd08fe84cb4da6f21e382edd18926e7ad3f4350de6783893 WatchSource:0}: Error finding container 0dff9e69a0d270f9bd08fe84cb4da6f21e382edd18926e7ad3f4350de6783893: Status 404 returned error can't find the container with id 0dff9e69a0d270f9bd08fe84cb4da6f21e382edd18926e7ad3f4350de6783893 Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.858963 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-87c9258e05fa9520f479bfd496732dcd2f885e8da3d76b7e9de110bb609a413c WatchSource:0}: Error finding container 87c9258e05fa9520f479bfd496732dcd2f885e8da3d76b7e9de110bb609a413c: Status 404 returned error can't find the container with id 87c9258e05fa9520f479bfd496732dcd2f885e8da3d76b7e9de110bb609a413c Jan 29 10:42:03 crc kubenswrapper[4852]: W0129 10:42:03.860110 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod840b2302_f5bc_46f5_b645_9a759fe39d4d.slice/crio-4e8e44f8a73ece2db7d2ac6ee5301359c3c455116d636feb0a58080bad49510c WatchSource:0}: Error finding container 4e8e44f8a73ece2db7d2ac6ee5301359c3c455116d636feb0a58080bad49510c: Status 404 returned error can't find the container with id 4e8e44f8a73ece2db7d2ac6ee5301359c3c455116d636feb0a58080bad49510c Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.031781 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-mclx7"] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.032613 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-zdz6d"] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.032863 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.033213 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.034909 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035005 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-sd8vh"] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035100 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035674 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035731 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035851 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.035937 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.036003 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.036664 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.036859 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.037014 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.040547 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.040611 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.040707 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.044884 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.062132 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.078653 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.095933 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.103561 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.103704 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:05.103680116 +0000 UTC m=+22.321011250 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.103749 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.103876 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.103947 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:05.103927653 +0000 UTC m=+22.321258787 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.109087 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.121893 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.133425 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.145864 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.155100 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.158740 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-29 10:37:03 +0000 UTC, rotation deadline is 2026-10-18 21:43:11.129074356 +0000 UTC Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.158792 4852 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6299h1m6.970285231s for next certificate rotation Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.163340 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.173004 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.181904 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.188907 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.198321 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.198394 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204276 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-netns\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204310 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23a48459-954c-4e1a-bd79-bc6018bc255f-proxy-tls\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204333 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204355 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23a48459-954c-4e1a-bd79-bc6018bc255f-mcd-auth-proxy-config\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-os-release\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204386 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-kubelet\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msh5n\" (UniqueName: \"kubernetes.io/projected/874aced6-eac8-456a-8d96-f2ab970a5989-kube-api-access-msh5n\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204419 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-system-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204448 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjdv5\" (UniqueName: \"kubernetes.io/projected/80701ea9-a994-4a9f-8291-e3e40decfeda-kube-api-access-bjdv5\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204478 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204501 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204513 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204560 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:05.204542404 +0000 UTC m=+22.421873768 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204633 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204694 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-k8s-cni-cncf-io\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204830 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-daemon-config\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204876 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204900 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-binary-copy\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204922 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204943 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-cni-binary-copy\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204968 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-socket-dir-parent\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204973 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204988 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.204995 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.204995 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.205020 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:05.205013546 +0000 UTC m=+22.422344680 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205042 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-cnibin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205076 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvrzw\" (UniqueName: \"kubernetes.io/projected/23a48459-954c-4e1a-bd79-bc6018bc255f-kube-api-access-lvrzw\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205098 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-system-cni-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205124 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-os-release\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205148 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-bin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205174 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-conf-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205197 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-etc-kubernetes\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205230 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-cnibin\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205252 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-multus-certs\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205271 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-hostroot\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205302 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205329 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/23a48459-954c-4e1a-bd79-bc6018bc255f-rootfs\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205350 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.205370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-multus\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.205419 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: E0129 10:42:04.205475 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:05.205460567 +0000 UTC m=+22.422791701 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.206032 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.218391 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.222240 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.229143 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.237561 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.248077 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.256486 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.267007 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.277890 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.286955 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.296452 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306568 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-system-cni-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306645 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-os-release\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306678 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-bin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306705 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-conf-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306727 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-etc-kubernetes\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306750 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvrzw\" (UniqueName: \"kubernetes.io/projected/23a48459-954c-4e1a-bd79-bc6018bc255f-kube-api-access-lvrzw\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306776 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-cnibin\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306677 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-system-cni-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306802 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-multus-certs\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306777 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-conf-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306752 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-bin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306852 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/23a48459-954c-4e1a-bd79-bc6018bc255f-rootfs\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-os-release\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306869 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306896 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-multus-certs\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306896 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-etc-kubernetes\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306929 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-multus\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306911 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-cni-multus\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306941 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-cnibin\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.306966 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-hostroot\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307002 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/23a48459-954c-4e1a-bd79-bc6018bc255f-rootfs\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307014 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-netns\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307029 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-hostroot\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307071 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23a48459-954c-4e1a-bd79-bc6018bc255f-proxy-tls\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307098 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23a48459-954c-4e1a-bd79-bc6018bc255f-mcd-auth-proxy-config\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307119 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-os-release\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307128 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-netns\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307151 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-kubelet\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307178 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msh5n\" (UniqueName: \"kubernetes.io/projected/874aced6-eac8-456a-8d96-f2ab970a5989-kube-api-access-msh5n\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307198 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjdv5\" (UniqueName: \"kubernetes.io/projected/80701ea9-a994-4a9f-8291-e3e40decfeda-kube-api-access-bjdv5\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307218 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-system-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307240 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-k8s-cni-cncf-io\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-daemon-config\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307334 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307357 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-cni-binary-copy\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-socket-dir-parent\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307408 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-binary-copy\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307429 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307447 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-cnibin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307513 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-cnibin\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307533 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874aced6-eac8-456a-8d96-f2ab970a5989-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307610 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-system-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307217 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-os-release\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307675 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-run-k8s-cni-cncf-io\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23a48459-954c-4e1a-bd79-bc6018bc255f-mcd-auth-proxy-config\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308204 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.307531 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-host-var-lib-kubelet\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308421 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-cni-dir\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308437 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874aced6-eac8-456a-8d96-f2ab970a5989-cni-binary-copy\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308469 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-socket-dir-parent\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308665 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-multus-daemon-config\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308689 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80701ea9-a994-4a9f-8291-e3e40decfeda-cni-binary-copy\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.308915 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.310431 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23a48459-954c-4e1a-bd79-bc6018bc255f-proxy-tls\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.316993 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.324427 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvrzw\" (UniqueName: \"kubernetes.io/projected/23a48459-954c-4e1a-bd79-bc6018bc255f-kube-api-access-lvrzw\") pod \"machine-config-daemon-zdz6d\" (UID: \"23a48459-954c-4e1a-bd79-bc6018bc255f\") " pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.325939 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjdv5\" (UniqueName: \"kubernetes.io/projected/80701ea9-a994-4a9f-8291-e3e40decfeda-kube-api-access-bjdv5\") pod \"multus-sd8vh\" (UID: \"80701ea9-a994-4a9f-8291-e3e40decfeda\") " pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.326552 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msh5n\" (UniqueName: \"kubernetes.io/projected/874aced6-eac8-456a-8d96-f2ab970a5989-kube-api-access-msh5n\") pod \"multus-additional-cni-plugins-mclx7\" (UID: \"874aced6-eac8-456a-8d96-f2ab970a5989\") " pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.327508 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.333328 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.342498 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.350239 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.360489 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.368380 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.375210 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.386343 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-22xhj"] Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.387742 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.389825 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.389935 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.390016 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.390238 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.390333 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.402461 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.420959 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 10:38:38.792795317 +0000 UTC Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.422241 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.443596 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.448364 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.449528 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mclx7" Jan 29 10:42:04 crc kubenswrapper[4852]: W0129 10:42:04.453346 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23a48459_954c_4e1a_bd79_bc6018bc255f.slice/crio-79052bbbf7505e2d3ca1da6621c1226d900a474ce4006e75001f358e362bd127 WatchSource:0}: Error finding container 79052bbbf7505e2d3ca1da6621c1226d900a474ce4006e75001f358e362bd127: Status 404 returned error can't find the container with id 79052bbbf7505e2d3ca1da6621c1226d900a474ce4006e75001f358e362bd127 Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.457442 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-sd8vh" Jan 29 10:42:04 crc kubenswrapper[4852]: W0129 10:42:04.459651 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod874aced6_eac8_456a_8d96_f2ab970a5989.slice/crio-56aa7a86d7985cda1156b1e34732cc916ec73d80480f7d9865a7b77d477e8d98 WatchSource:0}: Error finding container 56aa7a86d7985cda1156b1e34732cc916ec73d80480f7d9865a7b77d477e8d98: Status 404 returned error can't find the container with id 56aa7a86d7985cda1156b1e34732cc916ec73d80480f7d9865a7b77d477e8d98 Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.492697 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509548 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509629 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509681 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509702 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509725 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509746 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509774 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509795 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509815 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509833 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509858 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsng4\" (UniqueName: \"kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509879 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509899 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509915 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.509952 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.510022 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.510114 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.510136 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.510168 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.528685 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.567750 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.602316 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"87c9258e05fa9520f479bfd496732dcd2f885e8da3d76b7e9de110bb609a413c"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.603993 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"33a1cc0a0c575fa64182944306de426f066a9c6c559865373199450b2336e46a"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.605303 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"79052bbbf7505e2d3ca1da6621c1226d900a474ce4006e75001f358e362bd127"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.606745 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0dff9e69a0d270f9bd08fe84cb4da6f21e382edd18926e7ad3f4350de6783893"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.608217 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-r27t7" event={"ID":"ac5c30fd-7234-4f76-8005-c86304d0d94a","Type":"ContainerStarted","Data":"cb0af13537e3001d998a581b46e7b1e4fda959714eb4cccd8c78ad2e02b7b4b7"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611307 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611474 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611553 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611479 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611745 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611862 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611973 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612067 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611869 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.611910 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612203 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612077 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612374 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612368 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612491 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612672 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612716 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612752 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612794 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsng4\" (UniqueName: \"kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612838 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612883 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612920 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612797 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.612985 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613057 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613079 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613147 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613159 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerStarted","Data":"56aa7a86d7985cda1156b1e34732cc916ec73d80480f7d9865a7b77d477e8d98"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613183 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613251 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613252 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613286 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613327 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613300 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613327 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613330 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613295 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.613385 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.614304 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.615051 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6grc8" event={"ID":"840b2302-f5bc-46f5-b645-9a759fe39d4d","Type":"ContainerStarted","Data":"4e8e44f8a73ece2db7d2ac6ee5301359c3c455116d636feb0a58080bad49510c"} Jan 29 10:42:04 crc kubenswrapper[4852]: I0129 10:42:04.616791 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.118650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.118834 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.119062 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.119075 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:07.119028431 +0000 UTC m=+24.336359565 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.119214 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:07.119178755 +0000 UTC m=+24.336509889 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.220047 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.220095 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.220138 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220281 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220302 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220298 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220301 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220400 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220414 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220460 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:07.220432972 +0000 UTC m=+24.437764096 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220315 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220530 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:07.220509274 +0000 UTC m=+24.437840618 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.220627 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:07.220601346 +0000 UTC m=+24.437932580 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.640165 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 06:21:56.670060329 +0000 UTC Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.641570 4852 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.642770 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.642811 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.643049 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.643163 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.643351 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:05 crc kubenswrapper[4852]: E0129 10:42:05.643512 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:05 crc kubenswrapper[4852]: W0129 10:42:05.650970 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80701ea9_a994_4a9f_8291_e3e40decfeda.slice/crio-be96755d8cf148736dfa121e51ef452eb63d65ca0b0fdfd6966660e9be717c4d WatchSource:0}: Error finding container be96755d8cf148736dfa121e51ef452eb63d65ca0b0fdfd6966660e9be717c4d: Status 404 returned error can't find the container with id be96755d8cf148736dfa121e51ef452eb63d65ca0b0fdfd6966660e9be717c4d Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.661739 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.662673 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.666457 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.674158 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsng4\" (UniqueName: \"kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4\") pod \"ovnkube-node-22xhj\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.675738 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.676596 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.677979 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.678819 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.680108 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.680690 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.681780 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.682283 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.683539 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.684275 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.685370 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.685933 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.686436 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.687361 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.688031 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.688811 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.689356 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.690343 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.691962 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.692505 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.692943 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.694544 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.695087 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.696768 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.697686 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.699847 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.699874 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.700560 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.707709 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.708281 4852 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.708385 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.710391 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.711640 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.712096 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.713942 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.715036 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.715570 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.716553 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.717233 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.718182 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.718851 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.719872 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.720518 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.721560 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.722154 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.723268 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.724835 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.725857 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.726331 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.727215 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.727775 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.728403 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.729422 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.741912 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.823166 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.837349 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.868026 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.892173 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.900029 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:05 crc kubenswrapper[4852]: I0129 10:42:05.906143 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:05 crc kubenswrapper[4852]: W0129 10:42:05.968931 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e44156c_fa1a_4edf_a317_e63b96f7aae4.slice/crio-636f954c5f1d9e5c4fe326369f1fffb1156d9c3c44fe05eda3d122d77ceb688c WatchSource:0}: Error finding container 636f954c5f1d9e5c4fe326369f1fffb1156d9c3c44fe05eda3d122d77ceb688c: Status 404 returned error can't find the container with id 636f954c5f1d9e5c4fe326369f1fffb1156d9c3c44fe05eda3d122d77ceb688c Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.641341 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 21:58:57.458388869 +0000 UTC Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.665656 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6grc8" event={"ID":"840b2302-f5bc-46f5-b645-9a759fe39d4d","Type":"ContainerStarted","Data":"3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.667705 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerStarted","Data":"95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.667772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerStarted","Data":"be96755d8cf148736dfa121e51ef452eb63d65ca0b0fdfd6966660e9be717c4d"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.669577 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.670036 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.671950 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e" exitCode=255 Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.672005 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.672037 4852 scope.go:117] "RemoveContainer" containerID="55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.674438 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.674947 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.675967 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-r27t7" event={"ID":"ac5c30fd-7234-4f76-8005-c86304d0d94a","Type":"ContainerStarted","Data":"fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.679558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.680171 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.681673 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.681739 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.685807 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerStarted","Data":"410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.688308 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.689176 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" exitCode=0 Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.689262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.689297 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"636f954c5f1d9e5c4fe326369f1fffb1156d9c3c44fe05eda3d122d77ceb688c"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.690204 4852 scope.go:117] "RemoveContainer" containerID="5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e" Jan 29 10:42:06 crc kubenswrapper[4852]: E0129 10:42:06.690415 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.691000 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba"} Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.697041 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.708914 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.719398 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.733046 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.744256 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.757865 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.768479 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.783254 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.791213 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.804625 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.814689 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.826016 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.836535 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.852161 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.861337 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.872199 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://55f723d10d30c612432ad7ed9cdd31079264832fd2fc25f9ccecbef8a0c13077\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:41:57Z\\\",\\\"message\\\":\\\"W0129 10:41:46.986175 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 10:41:47.041782 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769683307 cert, and key in /tmp/serving-cert-537503352/serving-signer.crt, /tmp/serving-cert-537503352/serving-signer.key\\\\nI0129 10:41:47.363764 1 observer_polling.go:159] Starting file observer\\\\nW0129 10:41:47.369327 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 10:41:47.369646 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:41:47.371852 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-537503352/tls.crt::/tmp/serving-cert-537503352/tls.key\\\\\\\"\\\\nF0129 10:41:57.661811 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.880137 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.889899 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.898868 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.909219 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.920228 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.929882 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.939864 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.946630 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.955912 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:06 crc kubenswrapper[4852]: I0129 10:42:06.967528 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.161005 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.161124 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.161352 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:11.161322124 +0000 UTC m=+28.378653268 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.161418 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.161469 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:11.161460237 +0000 UTC m=+28.378791381 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.261564 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.261684 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.261717 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261730 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261791 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261796 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:11.261779692 +0000 UTC m=+28.479110836 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261803 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261813 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261841 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:11.261831994 +0000 UTC m=+28.479163118 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261961 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.261999 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.262015 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.262083 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:11.262063929 +0000 UTC m=+28.479395083 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.462666 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.462750 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.462793 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.462663 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.462883 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.462980 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.641863 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 08:52:11.967018673 +0000 UTC Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.695184 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.697747 4852 scope.go:117] "RemoveContainer" containerID="5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e" Jan 29 10:42:07 crc kubenswrapper[4852]: E0129 10:42:07.697923 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704842 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704877 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704890 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704898 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704906 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.704914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.706206 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf" exitCode=0 Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.706287 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf"} Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.729402 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.747220 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.763625 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.778923 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.790874 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.802375 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.818569 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.854130 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.887616 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.906040 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.918369 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.930825 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.942836 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.956753 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.968435 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.989996 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:07 crc kubenswrapper[4852]: I0129 10:42:07.999836 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:07Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.011944 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.023997 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.037246 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.059635 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.076137 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.088227 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.099266 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.111102 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.124459 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.138983 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.153311 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.642527 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 18:47:34.929847758 +0000 UTC Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.710685 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerStarted","Data":"3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2"} Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.724457 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.737976 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.751575 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.763978 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.774240 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.786390 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.796141 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.810960 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.828674 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.840232 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.852071 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.861981 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.874231 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:08 crc kubenswrapper[4852]: I0129 10:42:08.887731 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:08Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.449616 4852 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.454173 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.454221 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.454231 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.454356 4852 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.460296 4852 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.460600 4852 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.461631 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.461665 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.461677 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.461701 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.461714 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.462970 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.462987 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.463070 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.463146 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.463180 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.463244 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.479656 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.484310 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.484345 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.484359 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.484376 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.484387 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.498603 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.501951 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.501993 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.502003 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.502019 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.502030 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.516966 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.520532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.520560 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.520571 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.520605 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.520615 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.532228 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.535436 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.535470 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.535480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.535492 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.535501 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.555345 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: E0129 10:42:09.555494 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.557880 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.557913 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.557927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.557944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.557956 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.643302 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 21:45:07.012017142 +0000 UTC Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.660824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.660874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.660889 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.660909 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.660926 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.716679 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2" exitCode=0 Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.716778 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.734659 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.758925 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.764265 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.764305 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.764319 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.764339 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.764368 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.779556 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.797157 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.810457 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.826518 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.839929 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.851194 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.864438 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.867496 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.867527 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.867536 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.867550 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.867559 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.875655 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.888771 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.898800 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.911847 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.924730 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:09Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.970047 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.970094 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.970104 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.970119 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:09 crc kubenswrapper[4852]: I0129 10:42:09.970130 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:09Z","lastTransitionTime":"2026-01-29T10:42:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.073328 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.073377 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.073388 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.073406 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.073420 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.176979 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.177015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.177024 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.177039 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.177048 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.280009 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.280045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.280054 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.280068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.280077 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.382712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.382776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.382798 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.382827 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.382850 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.485744 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.485808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.485827 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.485852 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.485873 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.589093 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.589151 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.589163 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.589185 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.589199 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.644848 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 01:42:48.36519263 +0000 UTC Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.693075 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.693127 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.693145 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.693171 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.693190 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.728357 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.732822 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45" exitCode=0 Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.732866 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.749099 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.769436 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.783827 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.795548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.795627 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.795643 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.795661 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.795675 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.799150 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.815413 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.828561 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.839362 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.852185 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.873105 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.889907 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.898681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.898725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.898739 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.898758 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.898773 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:10Z","lastTransitionTime":"2026-01-29T10:42:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.903698 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.918553 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.938793 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:10 crc kubenswrapper[4852]: I0129 10:42:10.950949 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:10Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.001128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.001155 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.001164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.001179 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.001216 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.103783 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.103828 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.103839 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.103859 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.103870 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.195237 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.195457 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.195625 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.195825 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.195790824 +0000 UTC m=+36.413122128 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.195961 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.195931157 +0000 UTC m=+36.413262291 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.206686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.206802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.206866 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.206942 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.207044 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.296020 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.296360 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.296541 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.296258 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.296853 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.296973 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.297131 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.297108983 +0000 UTC m=+36.514440147 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.296491 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.297392 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.297377099 +0000 UTC m=+36.514708243 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.296678 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.297634 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.297744 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.297909 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.297893242 +0000 UTC m=+36.515224386 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.309278 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.309318 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.309327 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.309341 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.309350 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.411527 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.411622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.411640 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.411664 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.411681 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.462522 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.462690 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.462725 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.462767 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.462883 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:11 crc kubenswrapper[4852]: E0129 10:42:11.462988 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.514408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.514446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.514459 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.514473 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.514485 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.618005 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.618366 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.618514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.618781 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.619001 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.647215 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 08:14:38.420684155 +0000 UTC Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.722782 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.722838 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.722855 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.722877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.722894 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.740934 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5" exitCode=0 Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.741357 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.764012 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.782985 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.797347 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.817526 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.825210 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.825343 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.825443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.825530 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.825614 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.833341 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.851082 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.866524 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.884665 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.898348 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.910786 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.923606 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.928140 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.928165 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.928175 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.928189 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.928198 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:11Z","lastTransitionTime":"2026-01-29T10:42:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.934877 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.947302 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:11 crc kubenswrapper[4852]: I0129 10:42:11.961802 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.031953 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.032018 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.032029 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.032065 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.032076 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.135469 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.135520 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.135535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.135609 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.135624 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.238216 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.238276 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.238290 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.238308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.238321 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.340837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.340874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.340882 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.340898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.340908 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.442481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.442514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.442521 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.442534 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.442542 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.545407 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.545454 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.545464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.545481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.545492 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.615513 4852 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.647429 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 05:14:12.678924386 +0000 UTC Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.648985 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.649021 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.649031 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.649046 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.649056 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.748509 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.748788 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.753385 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.753468 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.753491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.753531 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.753550 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.755467 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerStarted","Data":"7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.764510 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.780650 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.791546 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.804826 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.839592 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.852278 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.852630 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.855525 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.855555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.855563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.855575 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.855601 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.865566 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.878670 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.890395 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.902651 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.915291 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.931265 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.945065 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.958135 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.958177 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.958186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.958204 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.958214 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:12Z","lastTransitionTime":"2026-01-29T10:42:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.959061 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.973915 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.984510 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:12 crc kubenswrapper[4852]: I0129 10:42:12.996131 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:12Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.010895 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.021909 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.036897 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.054650 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.060446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.060481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.060495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.060509 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.060518 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.066749 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.076876 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.087780 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.097940 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.109435 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.122238 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.135009 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.163176 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.163222 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.163233 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.163251 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.163270 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.228601 4852 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.265823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.265860 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.265868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.265884 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.265895 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.367856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.367907 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.367915 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.367928 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.367939 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.465379 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.465659 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:13 crc kubenswrapper[4852]: E0129 10:42:13.465773 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:13 crc kubenswrapper[4852]: E0129 10:42:13.465875 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.465664 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:13 crc kubenswrapper[4852]: E0129 10:42:13.466195 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.469877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.469913 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.469925 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.469942 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.469954 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.478547 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.490919 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.501662 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.510704 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.522254 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.533073 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.550531 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.561435 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.571845 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.571878 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.571890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.571908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.571920 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.578485 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.598999 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.609059 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.619632 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.631352 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.643246 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.648361 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 16:00:08.734487647 +0000 UTC Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.674103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.674138 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.674147 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.674161 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.674170 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.758719 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.758798 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.776061 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.776091 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.776100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.776113 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.776125 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.788713 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.799358 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.809127 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.825025 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.838503 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.856173 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.870504 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.874803 4852 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.878863 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.878918 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.878935 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.878969 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.878992 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.883565 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.900947 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.912763 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.924654 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.937044 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.949114 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.964993 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.979391 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.980823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.980866 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.980876 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.980889 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:13 crc kubenswrapper[4852]: I0129 10:42:13.980898 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:13Z","lastTransitionTime":"2026-01-29T10:42:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.082726 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.082753 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.082761 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.082776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.082784 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.185804 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.185857 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.185867 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.185894 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.185908 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.287849 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.287884 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.287893 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.287906 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.287915 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.389994 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.390036 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.390045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.390058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.390069 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.492144 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.492186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.492197 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.492213 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.492225 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.539672 4852 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.594618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.594658 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.594667 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.594681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.594691 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.648489 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 15:37:50.960538727 +0000 UTC Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.697965 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.698297 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.698420 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.698506 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.698619 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.765775 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140" exitCode=0 Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.766548 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.781389 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.803262 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.803326 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.803340 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.803365 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.803378 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.804882 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.841861 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.864918 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.897920 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.906969 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.907259 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.907276 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.907292 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.907303 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:14Z","lastTransitionTime":"2026-01-29T10:42:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.912104 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.925904 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.938763 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.953746 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.966236 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.980103 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:14 crc kubenswrapper[4852]: I0129 10:42:14.993806 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:14Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.007226 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.010703 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.010743 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.010753 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.010769 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.010781 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.019026 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.112366 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.112408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.112416 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.112433 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.112443 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.215079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.215153 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.215168 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.215192 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.215207 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.318514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.318555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.318565 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.318606 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.318625 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.420689 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.420725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.420734 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.420748 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.420758 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.462994 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:15 crc kubenswrapper[4852]: E0129 10:42:15.463202 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.463012 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:15 crc kubenswrapper[4852]: E0129 10:42:15.463321 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.463012 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:15 crc kubenswrapper[4852]: E0129 10:42:15.463412 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.523778 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.523839 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.523852 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.523877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.523890 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.627269 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.627312 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.627320 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.627333 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.627342 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.649034 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 22:43:55.407212611 +0000 UTC Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.729918 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.729962 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.729978 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.729995 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.730007 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.771951 4852 generic.go:334] "Generic (PLEG): container finished" podID="874aced6-eac8-456a-8d96-f2ab970a5989" containerID="bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d" exitCode=0 Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.772715 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerDied","Data":"bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.789682 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.806350 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.823228 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.835097 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.835135 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.835144 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.835159 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.835168 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.836047 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.848687 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.860328 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.871135 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.883347 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.891657 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.902149 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.913200 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.926557 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.937906 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.937951 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.937963 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.937978 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.937989 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:15Z","lastTransitionTime":"2026-01-29T10:42:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.942200 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:15 crc kubenswrapper[4852]: I0129 10:42:15.961331 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:15Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.025481 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.026123 4852 scope.go:117] "RemoveContainer" containerID="5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.040667 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.040722 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.040733 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.040750 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.040761 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.144334 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.144368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.144378 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.144396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.144407 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.247963 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.248010 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.248403 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.248437 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.248653 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.354538 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.354575 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.354612 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.354633 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.354647 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.457256 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.457295 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.457308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.457323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.457331 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.559613 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.559666 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.559675 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.559691 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.559700 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.649674 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 06:04:53.944109785 +0000 UTC Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.661868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.662147 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.662332 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.662508 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.662712 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.706556 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v"] Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.707635 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.711870 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.712262 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.732371 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.746322 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.756747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.756779 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.756804 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31063684-b722-4141-a737-40f04b50f3ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.756841 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x488\" (UniqueName: \"kubernetes.io/projected/31063684-b722-4141-a737-40f04b50f3ff-kube-api-access-2x488\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.758459 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.765409 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.765453 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.765465 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.765481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.765494 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.771452 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.777153 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" event={"ID":"874aced6-eac8-456a-8d96-f2ab970a5989","Type":"ContainerStarted","Data":"e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.779932 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.781423 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.781833 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.788493 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.803270 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.817341 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.834187 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.845919 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.857656 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.857704 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.857755 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31063684-b722-4141-a737-40f04b50f3ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.857802 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x488\" (UniqueName: \"kubernetes.io/projected/31063684-b722-4141-a737-40f04b50f3ff-kube-api-access-2x488\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.858328 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.858470 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31063684-b722-4141-a737-40f04b50f3ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.860057 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.864932 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31063684-b722-4141-a737-40f04b50f3ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.872084 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.872114 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.872124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.872141 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.872152 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.873439 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.886823 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.901107 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.902933 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x488\" (UniqueName: \"kubernetes.io/projected/31063684-b722-4141-a737-40f04b50f3ff-kube-api-access-2x488\") pod \"ovnkube-control-plane-749d76644c-msg8v\" (UID: \"31063684-b722-4141-a737-40f04b50f3ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.912981 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.927723 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.938911 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.959731 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.973935 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.974479 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.974507 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.974518 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.974536 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.974546 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:16Z","lastTransitionTime":"2026-01-29T10:42:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.984652 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:16 crc kubenswrapper[4852]: I0129 10:42:16.998702 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:16Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.016606 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.024506 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" Jan 29 10:42:17 crc kubenswrapper[4852]: W0129 10:42:17.044156 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31063684_b722_4141_a737_40f04b50f3ff.slice/crio-5f58ec8d985541031ef5d19f0437dc6c542a2abd18d79cdaef566ffc1c058ada WatchSource:0}: Error finding container 5f58ec8d985541031ef5d19f0437dc6c542a2abd18d79cdaef566ffc1c058ada: Status 404 returned error can't find the container with id 5f58ec8d985541031ef5d19f0437dc6c542a2abd18d79cdaef566ffc1c058ada Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.047392 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.070740 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.077102 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.077129 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.077136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.077165 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.077173 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.083412 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.102807 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.118159 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.142515 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.154691 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.170484 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.179504 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.179538 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.179548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.179563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.179572 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.180898 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.281231 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.281265 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.281277 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.281291 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.281301 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.383492 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.383517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.383525 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.383538 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.383546 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.465975 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:17 crc kubenswrapper[4852]: E0129 10:42:17.466083 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.466416 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:17 crc kubenswrapper[4852]: E0129 10:42:17.466484 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.466640 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:17 crc kubenswrapper[4852]: E0129 10:42:17.466711 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.486597 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.486628 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.486637 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.486651 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.486662 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.589818 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.589872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.589880 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.589893 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.589902 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.651021 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 05:13:35.767948266 +0000 UTC Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.692329 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.692366 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.692376 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.692393 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.692404 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795407 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" event={"ID":"31063684-b722-4141-a737-40f04b50f3ff","Type":"ContainerStarted","Data":"c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795715 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" event={"ID":"31063684-b722-4141-a737-40f04b50f3ff","Type":"ContainerStarted","Data":"052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795734 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" event={"ID":"31063684-b722-4141-a737-40f04b50f3ff","Type":"ContainerStarted","Data":"5f58ec8d985541031ef5d19f0437dc6c542a2abd18d79cdaef566ffc1c058ada"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795751 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795777 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795788 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.795812 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.808250 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.823936 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.837382 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.848701 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.860018 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.873527 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.886916 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.898126 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.898167 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.898177 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.898194 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.898212 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:17Z","lastTransitionTime":"2026-01-29T10:42:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.899023 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.912035 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.922440 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.931538 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.938888 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.947905 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.956531 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:17 crc kubenswrapper[4852]: I0129 10:42:17.968148 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:17Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.000632 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.000659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.000671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.000684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.000693 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.105793 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.105844 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.105856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.105874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.105890 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.195116 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-bqdnv"] Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.198288 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: E0129 10:42:18.198381 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.209384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.209419 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.209427 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.209441 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.209452 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.219644 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.232852 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.244675 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.256963 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.272348 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.275135 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.275206 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs2mw\" (UniqueName: \"kubernetes.io/projected/2d44fabb-f3c2-4492-9ab4-567a81928ccc-kube-api-access-cs2mw\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.287699 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.307002 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.311322 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.311360 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.311368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.311384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.311393 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.319262 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.332121 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.348491 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.358929 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.370982 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.375591 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs2mw\" (UniqueName: \"kubernetes.io/projected/2d44fabb-f3c2-4492-9ab4-567a81928ccc-kube-api-access-cs2mw\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.375640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: E0129 10:42:18.375755 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:18 crc kubenswrapper[4852]: E0129 10:42:18.375807 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:18.875793902 +0000 UTC m=+36.093125036 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.382958 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.390035 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs2mw\" (UniqueName: \"kubernetes.io/projected/2d44fabb-f3c2-4492-9ab4-567a81928ccc-kube-api-access-cs2mw\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.393288 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.403187 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.414128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.414164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.414173 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.414188 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.414198 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.416228 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.516458 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.516842 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.517050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.517181 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.517307 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.620214 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.620291 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.620308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.620338 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.620355 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.651463 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 18:15:56.541927562 +0000 UTC Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.723545 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.723643 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.723656 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.723680 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.723693 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.801665 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/0.log" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.806097 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644" exitCode=1 Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.806179 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.807227 4852 scope.go:117] "RemoveContainer" containerID="28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.824515 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.827063 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.827091 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.827099 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.827113 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.827123 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.850140 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"Node event handler 2\\\\nI0129 10:42:17.685604 6112 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685645 6112 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685702 6112 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685737 6112 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.686126 6112 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.686452 6112 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.687231 6112 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:17.687286 6112 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 10:42:17.687330 6112 factory.go:656] Stopping watch factory\\\\nI0129 10:42:17.687366 6112 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:17.687385 6112 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.862443 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.877705 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.880619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:18 crc kubenswrapper[4852]: E0129 10:42:18.880695 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:18 crc kubenswrapper[4852]: E0129 10:42:18.880743 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:19.880727357 +0000 UTC m=+37.098058491 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.891931 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.923874 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.931859 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.931919 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.931950 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.931997 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.932014 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:18Z","lastTransitionTime":"2026-01-29T10:42:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.952956 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.972465 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:18 crc kubenswrapper[4852]: I0129 10:42:18.991611 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:18Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.003740 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.013641 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.026400 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.035206 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.035255 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.035266 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.035284 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.035294 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.038440 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.051889 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.063730 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.073804 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.138172 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.138224 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.138237 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.138254 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.138263 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.241547 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.241596 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.241605 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.241618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.241628 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.286697 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.286852 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:42:35.286822413 +0000 UTC m=+52.504153557 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.286954 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.287076 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.287132 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:35.287119501 +0000 UTC m=+52.504450635 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.343951 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.343999 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.344013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.344032 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.344045 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.388115 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.388165 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.388202 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388314 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388331 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388342 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388340 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388392 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:35.388379759 +0000 UTC m=+52.605710893 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388428 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388529 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388457 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:35.38843189 +0000 UTC m=+52.605763064 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.388968 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.389065 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:42:35.389040115 +0000 UTC m=+52.606371319 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.446234 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.446278 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.446290 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.446307 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.446319 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.462754 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.462789 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.462805 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.462754 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.462885 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.462965 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.463006 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.463029 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.550023 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.550087 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.550102 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.550121 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.550132 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.651846 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 00:30:08.150366622 +0000 UTC Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.652174 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.652212 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.652221 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.652235 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.652244 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.736429 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.736467 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.736475 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.736490 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.736500 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.750207 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.753728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.753777 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.753792 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.753808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.753822 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.765765 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.769071 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.769109 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.769117 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.769132 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.769140 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.781307 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.784972 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.785209 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.785220 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.785235 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.785245 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.796929 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.799825 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.799872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.799920 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.799940 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.799951 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.811361 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/0.log" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.813615 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.814327 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.818912 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.819115 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.820685 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.820731 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.820745 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.820763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.820776 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.839009 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"Node event handler 2\\\\nI0129 10:42:17.685604 6112 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685645 6112 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685702 6112 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685737 6112 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.686126 6112 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.686452 6112 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.687231 6112 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:17.687286 6112 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 10:42:17.687330 6112 factory.go:656] Stopping watch factory\\\\nI0129 10:42:17.687366 6112 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:17.687385 6112 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.851961 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.864518 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.877830 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.888832 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.893506 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.893925 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: E0129 10:42:19.893979 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:21.89396214 +0000 UTC m=+39.111293274 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.901598 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.916439 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.924037 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.924069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.924077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.924099 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.924115 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:19Z","lastTransitionTime":"2026-01-29T10:42:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.927689 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.938951 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.951303 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.965232 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.978361 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:19 crc kubenswrapper[4852]: I0129 10:42:19.989929 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:19Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.004452 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.018855 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.026712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.026919 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.026999 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.027086 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.027162 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.041114 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.129545 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.129618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.129628 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.129645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.129659 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.232367 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.232408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.232421 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.232437 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.232446 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.334505 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.334592 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.334603 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.334622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.334631 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.437558 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.437645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.437660 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.437682 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.437697 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.540504 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.540569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.540616 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.540641 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.540657 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.642956 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.642997 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.643007 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.643027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.643038 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.652392 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 02:35:25.796266892 +0000 UTC Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.745171 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.745946 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.745977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.746008 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.746029 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.818316 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/1.log" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.818942 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/0.log" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.821347 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065" exitCode=1 Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.821395 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.821442 4852 scope.go:117] "RemoveContainer" containerID="28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.822040 4852 scope.go:117] "RemoveContainer" containerID="cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065" Jan 29 10:42:20 crc kubenswrapper[4852]: E0129 10:42:20.822230 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.838289 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.848040 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.848089 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.848103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.848124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.848138 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.852792 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.865901 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.874979 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.886781 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.898118 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.915344 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.926393 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.939775 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.950979 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.951032 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.951043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.951059 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.951070 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:20Z","lastTransitionTime":"2026-01-29T10:42:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.954738 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.971044 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.984262 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:20 crc kubenswrapper[4852]: I0129 10:42:20.994784 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:20Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.005997 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.016704 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.033432 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28438f2bf9f117bf59dd2124efbf66554bffec8aecaf2989c6651db10b23b644\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"Node event handler 2\\\\nI0129 10:42:17.685604 6112 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685645 6112 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685702 6112 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.685737 6112 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:17.686126 6112 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.686452 6112 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:17.687231 6112 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:17.687286 6112 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 10:42:17.687330 6112 factory.go:656] Stopping watch factory\\\\nI0129 10:42:17.687366 6112 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:17.687385 6112 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.052953 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.052988 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.052998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.053013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.053024 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.155706 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.156053 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.156064 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.156077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.156096 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.258719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.258771 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.258790 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.258812 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.258824 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.361400 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.361433 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.361443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.361456 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.361465 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.462396 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.462440 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.462517 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.462413 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.462718 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.463372 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.463568 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.463195 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.464554 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.464816 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.464942 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.465027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.465113 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.567604 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.567819 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.567927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.568026 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.568085 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.652735 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 16:30:01.905714655 +0000 UTC Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.670714 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.670927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.670983 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.671191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.671252 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.773798 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.773844 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.773854 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.773872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.773883 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.825540 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/1.log" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.829074 4852 scope.go:117] "RemoveContainer" containerID="cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.829336 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.844069 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.856068 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.869655 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.877182 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.877238 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.877251 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.877278 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.877295 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.884200 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.898729 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.911914 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.916252 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.916544 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:21 crc kubenswrapper[4852]: E0129 10:42:21.916679 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:25.916655824 +0000 UTC m=+43.133986968 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.922144 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.934519 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.946662 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.963934 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.978513 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.980535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.980564 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.980592 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.980613 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.980625 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:21Z","lastTransitionTime":"2026-01-29T10:42:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:21 crc kubenswrapper[4852]: I0129 10:42:21.991803 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:21Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.003445 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:22Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.012132 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:22Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.022258 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:22Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.033721 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:22Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.082387 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.082430 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.082441 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.082455 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.082464 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.184839 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.184871 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.184883 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.184898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.184909 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.286678 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.286771 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.286784 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.286801 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.286816 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.389696 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.389741 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.389753 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.389771 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.389783 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.492180 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.492226 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.492235 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.492252 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.492261 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.595043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.595079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.595087 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.595101 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.595110 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.653825 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 06:32:40.628332751 +0000 UTC Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.698157 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.698483 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.698728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.698868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.699023 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.801970 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.802006 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.802016 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.802033 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.802044 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.904678 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.904740 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.904758 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.904781 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:22 crc kubenswrapper[4852]: I0129 10:42:22.904802 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:22Z","lastTransitionTime":"2026-01-29T10:42:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.007124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.007196 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.007212 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.007232 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.007246 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.110002 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.110045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.110055 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.110070 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.110081 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.212412 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.212442 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.212451 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.212464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.212473 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.315946 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.316324 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.316419 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.316497 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.316566 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.419013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.419048 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.419056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.419073 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.419083 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.462843 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:23 crc kubenswrapper[4852]: E0129 10:42:23.463311 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.463001 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.463143 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.462939 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:23 crc kubenswrapper[4852]: E0129 10:42:23.463933 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:23 crc kubenswrapper[4852]: E0129 10:42:23.463741 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:23 crc kubenswrapper[4852]: E0129 10:42:23.464023 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.479373 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.490156 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.501216 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.513109 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.521020 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.521065 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.521081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.521103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.521117 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.527532 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.539717 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.552530 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.572256 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.582939 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.594949 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.605403 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.617737 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.622655 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.622688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.622696 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.622709 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.622717 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.631767 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.642862 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.654630 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 12:02:19.57313169 +0000 UTC Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.655106 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.664783 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:23Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.724837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.724897 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.724906 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.724921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.724930 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.827387 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.827440 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.827455 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.827476 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.827493 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.930679 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.930725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.930744 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.930764 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:23 crc kubenswrapper[4852]: I0129 10:42:23.930780 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:23Z","lastTransitionTime":"2026-01-29T10:42:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.034408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.034469 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.034491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.034517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.034538 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.136894 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.136935 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.136943 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.136957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.136968 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.239468 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.239529 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.239552 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.239614 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.239655 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.342393 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.342444 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.342454 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.342469 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.342481 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.445174 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.445208 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.445218 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.445234 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.445245 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.547929 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.547966 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.547975 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.547989 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.547998 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.650794 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.650838 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.650852 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.650870 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.650881 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.654736 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 04:21:32.068912423 +0000 UTC Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.753269 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.753309 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.753320 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.753336 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.753347 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.857809 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.857850 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.857859 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.857876 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.857887 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.960729 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.960768 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.960776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.960790 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:24 crc kubenswrapper[4852]: I0129 10:42:24.960798 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:24Z","lastTransitionTime":"2026-01-29T10:42:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.063009 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.063044 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.063055 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.063069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.063080 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.165786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.165984 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.165998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.166013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.166024 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.268464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.268535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.268554 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.268620 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.268645 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.372156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.372232 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.372257 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.372287 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.372311 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.462771 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.462915 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.463139 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.463190 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.463277 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.463541 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.463719 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.463897 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.476100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.476203 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.476224 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.476293 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.476314 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.579496 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.579535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.579546 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.579562 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.579575 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.655376 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 07:49:32.520667233 +0000 UTC Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.682200 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.682270 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.682292 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.682323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.682345 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.784746 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.784790 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.784805 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.784823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.784833 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.887526 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.887561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.887573 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.887615 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.887628 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.957617 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.957767 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:25 crc kubenswrapper[4852]: E0129 10:42:25.957828 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:33.957812738 +0000 UTC m=+51.175143872 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.990395 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.990435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.990443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.990458 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:25 crc kubenswrapper[4852]: I0129 10:42:25.990469 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:25Z","lastTransitionTime":"2026-01-29T10:42:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.093223 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.093258 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.093267 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.093282 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.093291 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.195786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.195837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.195853 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.195875 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.195890 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.299059 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.299107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.299119 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.299136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.299149 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.403368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.403446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.403465 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.403490 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.403515 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.506060 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.506152 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.506179 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.506209 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.506233 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.608920 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.608976 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.608993 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.609016 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.609033 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.656035 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 20:31:37.568015903 +0000 UTC Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.712005 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.712049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.712062 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.712080 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.712091 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.814269 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.814361 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.814377 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.814399 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.814415 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.916974 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.917043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.917065 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.917094 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:26 crc kubenswrapper[4852]: I0129 10:42:26.917117 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:26Z","lastTransitionTime":"2026-01-29T10:42:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.019641 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.019703 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.019719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.019743 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.019757 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.122326 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.122360 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.122369 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.122382 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.122391 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.225009 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.225038 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.225050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.225064 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.225073 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.327602 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.327663 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.327673 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.327686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.327694 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.430957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.430996 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.431006 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.431023 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.431032 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.462443 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.462512 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.462562 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:27 crc kubenswrapper[4852]: E0129 10:42:27.462673 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.462705 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:27 crc kubenswrapper[4852]: E0129 10:42:27.462802 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:27 crc kubenswrapper[4852]: E0129 10:42:27.462880 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:27 crc kubenswrapper[4852]: E0129 10:42:27.462983 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.533835 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.533871 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.533881 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.533897 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.533908 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.636700 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.636761 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.636778 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.636802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.636820 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.656266 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 09:45:02.523094499 +0000 UTC Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.739618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.739671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.739688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.739708 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.739723 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.842404 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.842438 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.842446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.842459 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.842467 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.944858 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.944932 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.944948 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.944971 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:27 crc kubenswrapper[4852]: I0129 10:42:27.944984 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:27Z","lastTransitionTime":"2026-01-29T10:42:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.009354 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.024870 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.043521 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.048111 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.048145 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.048156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.048173 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.048185 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.057150 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.074245 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.086548 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.099414 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.112863 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.124558 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.140996 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.150715 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.150760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.150770 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.150785 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.150795 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.155142 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.167001 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.178772 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.190339 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.199836 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.212021 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.232557 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:28Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.256281 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.256328 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.256372 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.256390 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.256406 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.359044 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.359095 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.359107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.359124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.359135 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.462290 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.462333 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.462344 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.462362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.462376 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.564624 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.565033 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.565122 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.565204 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.565267 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.656651 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 01:03:44.840008293 +0000 UTC Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.667983 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.668038 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.668050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.668068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.668082 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.770049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.770085 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.770095 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.770110 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.770120 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.873489 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.873546 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.873555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.873597 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.873612 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.975902 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.975955 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.975965 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.975979 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:28 crc kubenswrapper[4852]: I0129 10:42:28.975988 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:28Z","lastTransitionTime":"2026-01-29T10:42:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.077972 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.078004 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.078013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.078028 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.078037 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.181192 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.181252 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.181265 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.181283 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.181295 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.283821 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.283859 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.283868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.283881 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.283893 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.386709 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.386755 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.386768 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.386787 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.386799 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.462941 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.462994 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.463061 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:29 crc kubenswrapper[4852]: E0129 10:42:29.463202 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.463221 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:29 crc kubenswrapper[4852]: E0129 10:42:29.463382 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:29 crc kubenswrapper[4852]: E0129 10:42:29.463463 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:29 crc kubenswrapper[4852]: E0129 10:42:29.463507 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.488622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.488656 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.488667 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.488680 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.488692 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.591944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.591972 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.591980 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.591993 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.592001 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.656858 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 07:16:03.744963778 +0000 UTC Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.693976 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.694161 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.694226 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.694296 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.694357 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.798192 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.798238 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.798252 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.798270 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.798286 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.900696 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.900959 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.901067 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.901156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:29 crc kubenswrapper[4852]: I0129 10:42:29.901227 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:29Z","lastTransitionTime":"2026-01-29T10:42:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.003996 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.004034 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.004043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.004060 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.004069 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.089086 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.089747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.089784 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.089806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.089825 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.103158 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:30Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.106763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.106803 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.106822 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.106844 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.106861 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.124256 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:30Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.127775 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.127823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.127839 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.127859 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.127873 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.142432 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:30Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.146068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.146137 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.146147 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.146160 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.146169 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.159540 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:30Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.164307 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.164348 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.164358 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.164372 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.164381 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.176521 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:30Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:30 crc kubenswrapper[4852]: E0129 10:42:30.176651 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.178176 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.178209 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.178217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.178232 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.178242 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.281056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.281100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.281112 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.281130 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.281143 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.383926 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.384253 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.384449 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.384742 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.384960 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.488698 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.488737 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.488751 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.488770 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.488781 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.592283 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.592613 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.592759 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.592967 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.593142 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.657196 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 03:13:24.995161949 +0000 UTC Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.696092 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.696349 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.696506 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.696625 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.696737 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.799730 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.800011 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.800135 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.800247 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.800350 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.902944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.902977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.902985 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.903000 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:30 crc kubenswrapper[4852]: I0129 10:42:30.903010 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:30Z","lastTransitionTime":"2026-01-29T10:42:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.006318 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.006672 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.006819 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.006943 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.007045 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.111054 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.111090 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.111102 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.111119 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.111130 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.214071 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.214130 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.214157 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.214184 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.214202 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.317223 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.317264 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.317275 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.317292 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.317301 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.419888 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.420331 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.420433 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.420522 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.420643 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.463625 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.463667 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.463790 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:31 crc kubenswrapper[4852]: E0129 10:42:31.463984 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.464034 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:31 crc kubenswrapper[4852]: E0129 10:42:31.464186 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:31 crc kubenswrapper[4852]: E0129 10:42:31.464417 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:31 crc kubenswrapper[4852]: E0129 10:42:31.464485 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.522535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.522613 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.522630 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.522653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.522671 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.625118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.625189 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.625210 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.625240 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.625258 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.657949 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 12:59:01.091164198 +0000 UTC Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.727457 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.727512 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.727524 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.727540 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.727551 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.830112 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.830254 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.830286 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.830325 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.830348 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.907377 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.916113 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.920854 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.933183 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.933220 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.933228 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.933240 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.933249 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:31Z","lastTransitionTime":"2026-01-29T10:42:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.940946 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.955076 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.968368 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.982343 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:31 crc kubenswrapper[4852]: I0129 10:42:31.992674 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:31Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.004359 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.015705 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.028099 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.036042 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.036086 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.036098 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.036117 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.036128 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.038217 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.048869 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.062652 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.074167 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.085657 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.093932 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.105050 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.138081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.138125 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.138137 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.138154 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.138166 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.240685 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.240722 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.240733 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.240748 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.240759 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.342517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.342554 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.342624 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.342639 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.342647 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.445492 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.446414 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.446597 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.446846 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.447021 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.463716 4852 scope.go:117] "RemoveContainer" containerID="cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.549212 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.549517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.549535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.549556 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.549570 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.652896 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.652937 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.652949 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.652966 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.652975 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.658376 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 18:56:49.227065546 +0000 UTC Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.755448 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.755514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.755526 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.755563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.755576 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.857091 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.857147 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.857164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.857187 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.857201 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.862511 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/1.log" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.864897 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.886545 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.899175 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.908523 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.919417 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.930508 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.944866 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.956299 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.960034 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.960064 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.960076 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.960090 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.960101 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:32Z","lastTransitionTime":"2026-01-29T10:42:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.976920 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:32 crc kubenswrapper[4852]: I0129 10:42:32.989690 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.000782 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:32Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.026404 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.044537 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.061335 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.062635 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.062675 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.062684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.062699 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.062708 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.077184 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.088442 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.099417 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.109658 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.164605 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.164656 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.164669 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.164686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.164697 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.275376 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.275409 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.275419 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.275438 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.275454 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.377154 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.377197 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.377206 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.377221 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.377232 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.462646 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.462699 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.462734 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.462895 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.463100 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.463260 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.463375 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.463454 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.479747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.479806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.479822 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.479842 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.479854 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.488042 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.506480 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.519180 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.532934 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.547485 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.563145 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.579239 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.582160 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.582341 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.582476 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.582606 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.582736 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.592637 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.607191 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.618840 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.632332 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.645025 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.654906 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.658614 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 20:18:05.373933876 +0000 UTC Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.665437 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.677317 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.684902 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.684956 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.684967 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.684986 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.684997 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.693788 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.709765 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.787948 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.788014 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.788032 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.788058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.788075 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.868991 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/2.log" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.869967 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/1.log" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.872775 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c" exitCode=1 Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.872811 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.872845 4852 scope.go:117] "RemoveContainer" containerID="cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.873631 4852 scope.go:117] "RemoveContainer" containerID="4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.873780 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.890416 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.890659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.890732 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.890805 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.890903 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.894929 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.908455 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.922106 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.933092 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.950058 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.964026 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.975818 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.981710 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.981904 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:33 crc kubenswrapper[4852]: E0129 10:42:33.982009 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:42:49.981988623 +0000 UTC m=+67.199319757 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.988532 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:33Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.993421 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.993471 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.993480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.993502 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:33 crc kubenswrapper[4852]: I0129 10:42:33.993514 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:33Z","lastTransitionTime":"2026-01-29T10:42:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.002314 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.024351 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd6a88d4206798d642b56278945042eec13a49f6efdf07e99bdd3e7127f4d065\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:19Z\\\",\\\"message\\\":\\\"rotocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 10:42:19.809402 6395 ovnkube.go:599] Stopped ovnkube\\\\nI0129 10:42:19.809447 6395 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:19.809516 6395 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.039226 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.051810 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.064490 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.078706 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.093484 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.096096 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.096260 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.096337 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.096413 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.096484 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.103882 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.116231 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:34Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.198986 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.199508 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.199572 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.199674 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.199738 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.301400 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.301461 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.301476 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.301495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.301507 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.403881 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.404222 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.404445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.404678 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.404848 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.507296 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.507342 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.507357 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.507376 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.507388 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.609948 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.609987 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.609998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.610015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.610026 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.659645 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 13:51:38.488707612 +0000 UTC Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.712180 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.712457 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.712569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.712756 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.712914 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.815778 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.816170 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.816403 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.816658 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.816920 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.880032 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/2.log" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.920643 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.920774 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.920795 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.920824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:34 crc kubenswrapper[4852]: I0129 10:42:34.920859 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:34Z","lastTransitionTime":"2026-01-29T10:42:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.023937 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.024005 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.024023 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.024050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.024071 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.126877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.126921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.126935 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.126954 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.126966 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.229610 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.229654 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.229671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.229697 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.229725 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.295468 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.295716 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.295766 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:43:07.295729509 +0000 UTC m=+84.513060683 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.295828 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.295922 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:43:07.295891433 +0000 UTC m=+84.513222597 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.333001 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.333045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.333057 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.333074 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.333087 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.396908 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.396952 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.396977 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397089 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397104 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397114 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397160 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:43:07.397148752 +0000 UTC m=+84.614479886 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397158 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397188 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397235 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397261 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:43:07.397234874 +0000 UTC m=+84.614566048 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397267 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.397354 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:43:07.397325336 +0000 UTC m=+84.614656520 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.435899 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.435988 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.436015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.436050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.436074 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.462718 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.462856 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.463119 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.463209 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.463686 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.463792 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.463972 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:35 crc kubenswrapper[4852]: E0129 10:42:35.464061 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.539217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.539304 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.539330 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.539364 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.539389 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.641999 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.642028 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.642036 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.642049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.642058 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.659847 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 08:29:34.81446936 +0000 UTC Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.744974 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.745008 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.745024 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.745041 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.745053 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.847805 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.847856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.847869 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.847886 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.847897 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.950347 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.950435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.950459 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.950486 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:35 crc kubenswrapper[4852]: I0129 10:42:35.950508 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:35Z","lastTransitionTime":"2026-01-29T10:42:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.052884 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.052949 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.052961 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.052980 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.053003 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.155807 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.155857 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.155867 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.155885 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.155897 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.258417 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.258469 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.258481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.258499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.258511 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.360938 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.360969 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.360977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.360992 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.361001 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.464251 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.464323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.464345 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.464373 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.464395 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.566691 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.566745 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.566759 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.566774 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.566784 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.660486 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 00:52:37.563280273 +0000 UTC Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.669106 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.669181 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.669191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.669206 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.669216 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.772083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.772125 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.772134 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.772147 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.772156 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.874638 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.874684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.874698 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.874717 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.874728 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.976990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.977027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.977038 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.977056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:36 crc kubenswrapper[4852]: I0129 10:42:36.977067 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:36Z","lastTransitionTime":"2026-01-29T10:42:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.079544 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.079622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.079630 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.079645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.079681 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.182686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.182727 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.182740 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.182755 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.182764 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.285412 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.285469 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.285480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.285496 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.285509 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.388162 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.388205 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.388217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.388233 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.388246 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.463556 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.463643 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.463695 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.463555 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:37 crc kubenswrapper[4852]: E0129 10:42:37.463817 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:37 crc kubenswrapper[4852]: E0129 10:42:37.463974 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:37 crc kubenswrapper[4852]: E0129 10:42:37.464241 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:37 crc kubenswrapper[4852]: E0129 10:42:37.464302 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.491033 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.491081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.491093 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.491108 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.491119 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.594197 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.594531 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.594543 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.594559 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.594571 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.661233 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 18:14:30.063384478 +0000 UTC Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.696995 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.697030 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.697041 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.697057 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.697068 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.799070 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.799137 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.799159 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.799185 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.799204 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.901709 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.901749 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.901760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.901776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:37 crc kubenswrapper[4852]: I0129 10:42:37.901788 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:37Z","lastTransitionTime":"2026-01-29T10:42:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.004025 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.004068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.004079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.004093 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.004103 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.106658 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.106691 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.106699 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.106712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.106720 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.211042 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.211153 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.211182 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.211217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.211243 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.314700 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.314747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.314760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.314782 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.314796 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.418361 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.418422 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.418438 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.418463 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.418480 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.521528 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.521909 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.522071 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.522220 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.522387 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.625277 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.625612 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.625725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.625854 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.625965 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.661960 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 22:35:26.642069489 +0000 UTC Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.729633 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.729711 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.729722 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.729756 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.729767 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.832207 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.832238 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.832247 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.832262 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.832276 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.934861 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.934907 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.934917 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.934933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:38 crc kubenswrapper[4852]: I0129 10:42:38.934944 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:38Z","lastTransitionTime":"2026-01-29T10:42:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.036443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.036470 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.036480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.036493 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.036503 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.138927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.138978 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.138991 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.139010 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.139022 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.242297 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.242356 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.242374 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.242397 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.242416 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.347779 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.347838 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.347857 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.347898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.347922 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.451045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.451100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.451115 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.451135 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.451148 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.462852 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.462968 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.463008 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.463153 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:39 crc kubenswrapper[4852]: E0129 10:42:39.463427 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:39 crc kubenswrapper[4852]: E0129 10:42:39.463540 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:39 crc kubenswrapper[4852]: E0129 10:42:39.463725 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:39 crc kubenswrapper[4852]: E0129 10:42:39.463945 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.553724 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.554164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.554391 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.554692 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.554911 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.657992 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.658071 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.658094 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.658124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.658147 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.663296 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 04:21:48.739926726 +0000 UTC Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.761395 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.761462 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.761484 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.761512 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.761533 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.864439 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.864491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.864514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.864535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.864550 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.967461 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.967533 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.967551 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.967575 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:39 crc kubenswrapper[4852]: I0129 10:42:39.967634 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:39Z","lastTransitionTime":"2026-01-29T10:42:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.070712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.070928 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.071014 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.071080 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.071135 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.174368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.174675 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.174781 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.174876 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.174955 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.277997 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.278049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.278060 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.278077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.278088 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.380857 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.380917 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.380934 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.380959 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.380980 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.388192 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.388243 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.388253 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.388271 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.388282 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.402243 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:40Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.407823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.407934 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.408005 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.408095 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.408177 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.430433 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:40Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.434796 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.435116 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.435360 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.435714 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.436032 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.452434 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:40Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.457500 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.457551 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.457564 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.457606 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.457623 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.469803 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:40Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.473550 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.473600 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.473612 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.473627 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.473639 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.484201 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:40Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:40 crc kubenswrapper[4852]: E0129 10:42:40.484688 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.486422 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.486443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.486451 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.486464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.486472 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.589162 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.589218 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.589229 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.589256 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.589271 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.663974 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 12:38:09.611119364 +0000 UTC Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.695445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.695784 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.695794 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.695816 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.695828 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.799346 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.799831 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.799907 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.799990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.800068 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.902937 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.902995 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.903008 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.903028 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:40 crc kubenswrapper[4852]: I0129 10:42:40.903042 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:40Z","lastTransitionTime":"2026-01-29T10:42:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.006499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.006543 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.006555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.006569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.006578 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.110387 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.110461 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.110488 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.110520 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.110545 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.213118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.213156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.213174 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.213192 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.213203 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.315890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.315918 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.315927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.315957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.315965 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.418189 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.418240 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.418253 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.418271 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.418284 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.463004 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.463078 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.463017 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:41 crc kubenswrapper[4852]: E0129 10:42:41.463137 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.463092 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:41 crc kubenswrapper[4852]: E0129 10:42:41.463253 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:41 crc kubenswrapper[4852]: E0129 10:42:41.463349 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:41 crc kubenswrapper[4852]: E0129 10:42:41.463447 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.520779 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.520814 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.520824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.520837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.520845 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.623641 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.623696 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.623709 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.623727 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.623736 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.664491 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 11:20:05.270028049 +0000 UTC Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.726424 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.726739 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.726841 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.726926 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.726989 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.784704 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.785495 4852 scope.go:117] "RemoveContainer" containerID="4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c" Jan 29 10:42:41 crc kubenswrapper[4852]: E0129 10:42:41.785700 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.801856 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.815732 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.830898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.830959 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.830971 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.830990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.831003 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.837546 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.854042 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.871227 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.889087 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.906656 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.923842 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.932975 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.933415 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.933536 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.933735 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.933930 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:41Z","lastTransitionTime":"2026-01-29T10:42:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.948868 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.963413 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.980849 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:41 crc kubenswrapper[4852]: I0129 10:42:41.999779 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:41Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.011495 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:42Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.025442 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:42Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.036018 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.036066 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.036081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.036103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.036118 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.041918 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:42Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.054137 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:42Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.071185 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:42Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.139051 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.139090 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.139098 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.139113 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.139121 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.242022 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.242062 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.242070 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.242083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.242093 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.344969 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.345027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.345036 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.345050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.345059 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.448289 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.448344 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.448359 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.448381 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.448394 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.551000 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.551050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.551062 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.551079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.551095 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.654397 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.654435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.654449 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.654467 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.654479 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.665288 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 07:30:09.758991477 +0000 UTC Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.758123 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.758169 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.758181 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.758196 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.758207 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.863528 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.863629 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.863654 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.863684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.863702 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.967086 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.967153 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.967170 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.967193 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:42 crc kubenswrapper[4852]: I0129 10:42:42.967209 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:42Z","lastTransitionTime":"2026-01-29T10:42:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.070543 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.070615 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.070629 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.070659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.070677 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.172795 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.172848 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.172865 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.172884 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.172898 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.275781 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.275870 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.275895 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.275921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.275940 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.379250 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.379311 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.379333 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.379362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.379384 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.462427 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:43 crc kubenswrapper[4852]: E0129 10:42:43.462562 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.462807 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.462867 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:43 crc kubenswrapper[4852]: E0129 10:42:43.462952 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:43 crc kubenswrapper[4852]: E0129 10:42:43.463022 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.463045 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:43 crc kubenswrapper[4852]: E0129 10:42:43.463125 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.480205 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.482476 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.482513 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.482523 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.482544 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.482555 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.495655 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.506318 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.517435 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.527165 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.537505 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.551337 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.563260 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.572795 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.583319 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.585479 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.585503 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.585513 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.585529 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.585540 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.593049 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.603797 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.614642 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.628573 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.643109 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.657885 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.665735 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 08:45:09.104772649 +0000 UTC Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.674003 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:43Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.687618 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.687645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.687654 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.687667 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.687678 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.789561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.789636 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.789650 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.789668 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.789679 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.891764 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.891800 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.891808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.891824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.891835 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.994644 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.994690 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.994701 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.994719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:43 crc kubenswrapper[4852]: I0129 10:42:43.994729 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:43Z","lastTransitionTime":"2026-01-29T10:42:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.097072 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.097496 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.097593 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.097681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.097745 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.201289 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.201351 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.201368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.201393 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.201411 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.305050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.305117 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.305134 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.305164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.305312 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.408000 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.408058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.408068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.408083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.408091 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.510277 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.510316 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.510325 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.510339 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.510349 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.612625 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.612737 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.612757 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.612779 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.612794 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.666173 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 01:44:53.243390297 +0000 UTC Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.716517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.716655 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.716682 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.716716 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.716739 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.819079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.819118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.819126 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.819140 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.819149 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.920857 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.920909 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.920920 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.920935 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:44 crc kubenswrapper[4852]: I0129 10:42:44.920945 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:44Z","lastTransitionTime":"2026-01-29T10:42:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.022921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.022958 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.022999 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.023015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.023025 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.125310 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.125398 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.125415 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.125438 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.125454 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.228362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.228408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.228417 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.228436 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.228448 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.331348 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.331389 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.331399 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.331441 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.331453 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.434076 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.434118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.434128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.434142 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.434153 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.462991 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.463011 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.463007 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:45 crc kubenswrapper[4852]: E0129 10:42:45.463530 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:45 crc kubenswrapper[4852]: E0129 10:42:45.463174 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:45 crc kubenswrapper[4852]: E0129 10:42:45.463380 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.463029 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:45 crc kubenswrapper[4852]: E0129 10:42:45.463757 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.536712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.536775 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.536787 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.536806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.536819 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.639261 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.639307 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.639317 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.639334 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.639344 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.666648 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 16:10:27.76092468 +0000 UTC Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.741610 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.741649 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.741659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.741677 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.741689 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.843604 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.843646 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.843659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.843677 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.843688 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.946108 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.946136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.946144 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.946158 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:45 crc kubenswrapper[4852]: I0129 10:42:45.946166 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:45Z","lastTransitionTime":"2026-01-29T10:42:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.049059 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.049293 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.049381 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.049483 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.049568 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.151622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.151933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.152030 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.152122 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.152199 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.254854 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.255056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.255151 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.255236 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.255341 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.360498 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.360784 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.360902 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.360997 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.361066 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.464635 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.464692 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.464707 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.464727 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.464742 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.567203 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.567234 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.567242 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.567254 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.567262 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.667114 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 00:15:27.518455228 +0000 UTC Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.669093 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.669138 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.669152 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.669169 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.669186 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.771922 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.772212 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.772280 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.772361 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.772419 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.874815 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.874842 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.874851 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.874864 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.874873 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.977267 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.977327 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.977344 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.977371 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:46 crc kubenswrapper[4852]: I0129 10:42:46.977388 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:46Z","lastTransitionTime":"2026-01-29T10:42:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.079616 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.079644 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.079654 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.079669 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.079679 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.181639 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.181676 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.181688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.181703 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.181714 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.283895 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.283927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.283935 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.283949 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.283958 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.386693 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.386794 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.386812 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.386831 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.386844 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.463280 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.463282 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.463457 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:47 crc kubenswrapper[4852]: E0129 10:42:47.463606 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.463630 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:47 crc kubenswrapper[4852]: E0129 10:42:47.463733 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:47 crc kubenswrapper[4852]: E0129 10:42:47.463928 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:47 crc kubenswrapper[4852]: E0129 10:42:47.463990 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.488985 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.489223 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.489319 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.489385 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.489447 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.592246 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.592293 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.592310 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.592333 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.592352 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.667404 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 23:56:36.835941407 +0000 UTC Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.694285 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.694308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.694315 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.694329 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.694337 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.797104 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.797446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.797699 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.797877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.798020 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.901060 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.901333 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.901429 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.901845 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:47 crc kubenswrapper[4852]: I0129 10:42:47.901956 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:47Z","lastTransitionTime":"2026-01-29T10:42:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.005216 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.005681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.005910 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.006189 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.006425 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.109898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.109943 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.109953 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.109972 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.109983 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.212390 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.212435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.212447 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.212467 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.212481 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.315138 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.315174 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.315185 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.315208 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.315219 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.418122 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.418177 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.418190 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.418203 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.418214 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.521338 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.521561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.521724 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.521816 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.521886 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.624805 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.624861 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.624876 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.624900 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.624915 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.667725 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 02:16:57.785831989 +0000 UTC Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.727833 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.727891 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.727908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.727930 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.727959 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.831155 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.831194 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.831204 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.831221 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.831233 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.933930 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.934511 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.934687 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.934808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:48 crc kubenswrapper[4852]: I0129 10:42:48.934903 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:48Z","lastTransitionTime":"2026-01-29T10:42:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.038040 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.038094 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.038104 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.038121 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.038133 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.140214 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.140246 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.140254 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.140268 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.140279 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.242555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.242620 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.242632 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.242649 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.242661 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.345015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.345056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.345069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.345084 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.345097 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.449458 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.449494 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.449502 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.449517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.449526 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.462979 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:49 crc kubenswrapper[4852]: E0129 10:42:49.463111 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.463127 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.463173 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.462987 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:49 crc kubenswrapper[4852]: E0129 10:42:49.463345 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:49 crc kubenswrapper[4852]: E0129 10:42:49.463546 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:49 crc kubenswrapper[4852]: E0129 10:42:49.463706 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.552127 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.552162 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.552174 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.552191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.552206 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.655632 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.655674 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.655682 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.655701 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.655711 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.668677 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 03:01:13.431738751 +0000 UTC Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.757818 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.758219 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.758316 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.758400 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.758460 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.860682 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.860719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.860728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.860743 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.860752 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.962682 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.962716 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.962725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.962740 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:49 crc kubenswrapper[4852]: I0129 10:42:49.962750 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:49Z","lastTransitionTime":"2026-01-29T10:42:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.060071 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.060190 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.060254 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:43:22.060234515 +0000 UTC m=+99.277565649 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.065218 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.065242 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.065250 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.065263 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.065271 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.167445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.167473 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.167484 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.167499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.167510 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.269650 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.269718 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.269732 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.269754 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.269770 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.372034 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.372068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.372077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.372092 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.372103 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.473808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.474066 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.474125 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.474195 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.474253 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.528998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.529029 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.529037 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.529050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.529059 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.540401 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:50Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.543763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.543816 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.543834 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.543853 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.543867 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.558981 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:50Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.562400 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.562466 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.562476 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.562491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.562504 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.575278 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:50Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.579109 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.579825 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.579840 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.579861 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.579875 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.595081 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:50Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.598565 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.598630 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.598641 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.598656 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.598665 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.612370 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:50Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:50 crc kubenswrapper[4852]: E0129 10:42:50.612543 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.614042 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.614083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.614096 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.614115 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.614128 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.668798 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 15:48:24.368830851 +0000 UTC Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.716669 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.716727 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.716738 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.716757 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.716769 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.819184 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.819229 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.819240 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.819257 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.819268 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.922019 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.922080 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.922092 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.922110 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:50 crc kubenswrapper[4852]: I0129 10:42:50.922123 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:50Z","lastTransitionTime":"2026-01-29T10:42:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.024653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.024702 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.024716 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.024734 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.024746 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.126909 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.126955 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.126990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.127014 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.127028 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.230493 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.230539 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.230550 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.230570 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.230597 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.333825 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.333863 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.333873 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.333887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.333896 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.436797 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.436862 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.436874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.436889 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.436901 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.463311 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.463393 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.463454 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:51 crc kubenswrapper[4852]: E0129 10:42:51.463447 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:51 crc kubenswrapper[4852]: E0129 10:42:51.463510 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.463577 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:51 crc kubenswrapper[4852]: E0129 10:42:51.463611 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:51 crc kubenswrapper[4852]: E0129 10:42:51.463775 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.538766 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.538806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.538814 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.538828 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.538839 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.641085 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.641128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.641140 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.641158 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.641170 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.669962 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 04:54:53.243186143 +0000 UTC Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.743451 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.743503 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.743516 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.743534 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.743545 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.846315 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.846357 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.846368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.846384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.846394 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.948973 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.949007 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.949019 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.949035 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:51 crc kubenswrapper[4852]: I0129 10:42:51.949046 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:51Z","lastTransitionTime":"2026-01-29T10:42:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.050878 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.050921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.050929 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.050945 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.050954 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.153198 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.153231 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.153241 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.153257 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.153268 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.255446 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.255499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.255508 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.255521 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.255529 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.357230 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.357268 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.357280 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.357296 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.357307 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.459797 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.459853 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.459872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.459894 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.459911 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.475666 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.563455 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.563514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.563529 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.563555 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.563572 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.666255 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.666308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.666324 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.666345 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.666362 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.670715 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 15:48:12.438312489 +0000 UTC Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.769021 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.769077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.769097 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.769121 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.769139 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.871668 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.871725 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.871736 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.871758 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.871771 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.975169 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.975228 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.975245 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.975272 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:52 crc kubenswrapper[4852]: I0129 10:42:52.975290 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:52Z","lastTransitionTime":"2026-01-29T10:42:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.077440 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.077474 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.077482 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.077499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.077508 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.179872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.179913 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.179924 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.179941 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.179952 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.282417 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.282456 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.282467 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.282483 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.282493 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.384693 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.384728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.384736 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.384750 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.384759 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.462757 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.462856 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.462757 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:53 crc kubenswrapper[4852]: E0129 10:42:53.462892 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.463004 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:53 crc kubenswrapper[4852]: E0129 10:42:53.463006 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:53 crc kubenswrapper[4852]: E0129 10:42:53.463042 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:53 crc kubenswrapper[4852]: E0129 10:42:53.463090 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.489874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.489901 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.489908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.489921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.489929 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.493658 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.530356 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.545196 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.556843 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.569383 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.582785 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.592291 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.592334 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.592349 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.592368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.592381 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.597327 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.607506 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.620764 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.631386 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.642760 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.653124 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.664981 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.671257 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 13:20:27.714385433 +0000 UTC Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.679014 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.694320 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.694356 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.694368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.694384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.694394 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.695393 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.708353 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.718888 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.727603 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:53Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.796302 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.796657 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.796671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.796688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.796699 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.899310 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.899347 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.899358 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.899373 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:53 crc kubenswrapper[4852]: I0129 10:42:53.899383 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:53Z","lastTransitionTime":"2026-01-29T10:42:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.002352 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.002396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.002408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.002426 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.002437 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.104787 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.104825 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.104836 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.104852 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.104862 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.207699 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.207760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.207782 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.207812 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.207835 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.310248 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.310312 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.310324 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.310341 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.310352 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.412675 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.412719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.412732 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.412748 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.412761 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.515137 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.515184 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.515194 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.515208 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.515217 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.618004 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.618056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.618068 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.618084 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.618096 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.672073 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 09:18:25.308282408 +0000 UTC Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.720646 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.720698 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.720710 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.720728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.720739 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.823342 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.823384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.823396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.823414 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.823427 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.925811 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.925849 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.925858 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.925873 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.925883 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:54Z","lastTransitionTime":"2026-01-29T10:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.946829 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/0.log" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.946880 4852 generic.go:334] "Generic (PLEG): container finished" podID="80701ea9-a994-4a9f-8291-e3e40decfeda" containerID="95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460" exitCode=1 Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.946913 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerDied","Data":"95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460"} Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.947337 4852 scope.go:117] "RemoveContainer" containerID="95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.959406 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:54Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.967912 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:54Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.981103 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:54Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:54 crc kubenswrapper[4852]: I0129 10:42:54.991103 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:54Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.012555 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.023857 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.028039 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.028072 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.028084 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.028101 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.028112 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.036448 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.049142 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.066447 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.078151 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.088736 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.100365 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.115175 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.125991 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.130357 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.130516 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.130619 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.130702 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.130759 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.137168 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.148781 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.160987 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.169843 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.232533 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.232565 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.232574 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.232602 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.232612 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.335309 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.335340 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.335348 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.335362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.335370 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.438248 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.438489 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.438598 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.438694 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.438811 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.462611 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.462668 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.462710 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:55 crc kubenswrapper[4852]: E0129 10:42:55.462767 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:55 crc kubenswrapper[4852]: E0129 10:42:55.462863 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:55 crc kubenswrapper[4852]: E0129 10:42:55.462995 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.463073 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:55 crc kubenswrapper[4852]: E0129 10:42:55.463195 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.541303 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.541350 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.541362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.541382 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.541393 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.643804 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.643855 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.643872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.643897 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.643914 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.672370 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 13:15:32.000553031 +0000 UTC Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.746047 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.746254 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.746340 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.746414 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.746475 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.848754 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.848792 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.848805 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.848821 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.848832 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951014 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/0.log" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951108 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951325 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951342 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951365 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951383 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:55Z","lastTransitionTime":"2026-01-29T10:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.951278 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerStarted","Data":"ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1"} Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.965502 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.977645 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.987268 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:55 crc kubenswrapper[4852]: I0129 10:42:55.996856 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:55Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.005914 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.018061 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.026294 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.044864 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.053840 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.054130 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.054214 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.054302 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.054389 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.056977 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.068527 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.081967 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.093013 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.105262 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.118126 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.129404 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.140829 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.150972 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.156685 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.156855 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.156921 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.157008 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.157070 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.163532 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:56Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.260014 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.260058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.260066 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.260081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.260092 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.362462 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.362807 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.362941 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.363048 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.363128 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.464774 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.464806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.464817 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.464830 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.464842 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.567368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.567403 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.567413 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.567428 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.567438 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.669808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.669845 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.669856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.669871 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.669881 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.673216 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 11:34:47.608574676 +0000 UTC Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.772880 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.772923 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.772932 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.772946 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.772955 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.875217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.875256 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.875267 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.875282 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.875293 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.977392 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.977420 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.977428 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.977441 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:56 crc kubenswrapper[4852]: I0129 10:42:56.977450 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:56Z","lastTransitionTime":"2026-01-29T10:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.079744 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.079779 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.079791 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.079806 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.079815 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.182179 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.182222 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.182235 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.182251 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.182262 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.285062 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.285107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.285118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.285136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.285148 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.388672 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.388732 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.388745 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.388765 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.388784 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.463493 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.463558 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.463507 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:57 crc kubenswrapper[4852]: E0129 10:42:57.463654 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.463567 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:57 crc kubenswrapper[4852]: E0129 10:42:57.463798 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:57 crc kubenswrapper[4852]: E0129 10:42:57.463860 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:57 crc kubenswrapper[4852]: E0129 10:42:57.463897 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.464458 4852 scope.go:117] "RemoveContainer" containerID="4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.491854 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.491887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.491895 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.491908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.491917 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.593535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.593860 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.593870 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.593908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.593917 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.674275 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 06:54:05.643497751 +0000 UTC Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.696509 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.696539 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.696550 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.696566 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.696597 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.799471 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.799525 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.799535 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.799548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.799557 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.902318 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.902352 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.902362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.902376 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.902387 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:57Z","lastTransitionTime":"2026-01-29T10:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.959420 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/2.log" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.961571 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.963705 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:42:57 crc kubenswrapper[4852]: I0129 10:42:57.981186 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:57Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.003134 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.004401 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.004444 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.004459 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.004481 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.004497 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.025231 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.035387 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.047247 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.058829 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.068339 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.079986 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.090237 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.102334 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.106380 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.106411 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.106421 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.106436 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.106446 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.112744 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.123964 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.136571 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.145768 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.157361 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.173125 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.187072 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.196268 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.208774 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.208814 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.208822 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.208837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.208849 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.310824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.310866 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.310877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.310895 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.310905 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.413553 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.413606 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.413615 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.413629 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.413638 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.515554 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.515636 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.515649 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.515668 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.515682 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.617942 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.617970 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.617978 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.617990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.617998 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.675298 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 20:10:00.146251162 +0000 UTC Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.720523 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.720550 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.720557 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.720570 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.720594 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.823335 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.823371 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.823382 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.823397 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.823410 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.925540 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.925567 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.925575 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.925599 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.925608 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:58Z","lastTransitionTime":"2026-01-29T10:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.967293 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/3.log" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.968162 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/2.log" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.970879 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" exitCode=1 Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.970914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.970943 4852 scope.go:117] "RemoveContainer" containerID="4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.971720 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:42:58 crc kubenswrapper[4852]: E0129 10:42:58.971903 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:58 crc kubenswrapper[4852]: I0129 10:42:58.988781 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:58Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.004994 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.021313 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.028431 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.028520 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.028547 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.028629 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.028655 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.037150 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.054940 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.071720 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.082898 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.092974 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.101345 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.110725 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.121689 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.132901 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.132974 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.132986 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.133004 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.133015 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.136542 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.153964 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.168174 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.185627 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.202406 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.219133 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.236036 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.236092 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.236103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.236123 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.236138 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.243137 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c6cdb48ded83052b0bd709a6c53e278e9af6e7241b587b24bec351007779a6c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:33Z\\\",\\\"message\\\":\\\"/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 10:42:33.350818 6546 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350941 6546 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.350977 6546 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351165 6546 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 10:42:33.351545 6546 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 10:42:33.351560 6546 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 10:42:33.351591 6546 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 10:42:33.351606 6546 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 10:42:33.351621 6546 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 10:42:33.351638 6546 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 10:42:33.351645 6546 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 10:42:33.351625 6546 factory.go:656] Stopping watch factory\\\\nI0129 10:42:33.351709 6546 ovnkube.go:599] Stopped ovnkube\\\\nI0129 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:58Z\\\",\\\"message\\\":\\\"multus/network-metrics-daemon-bqdnv openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v openshift-ovn-kubernetes/ovnkube-node-22xhj openshift-image-registry/node-ca-r27t7 openshift-machine-config-operator/machine-config-daemon-zdz6d openshift-multus/multus-sd8vh]\\\\nI0129 10:42:58.362401 6941 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0129 10:42:58.362414 6941 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362422 6941 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362429 6941 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-sd8vh in node crc\\\\nI0129 10:42:58.362434 6941 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-sd8vh after 0 failed attempt(s)\\\\nI0129 10:42:58.362440 6941 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362454 6941 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:58.362515 6941 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.339624 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.339688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.339704 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.339731 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.339751 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.443281 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.443749 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.443828 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.443911 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.443975 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.462446 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.462521 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.462575 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:42:59 crc kubenswrapper[4852]: E0129 10:42:59.462663 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.462687 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:42:59 crc kubenswrapper[4852]: E0129 10:42:59.462936 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:42:59 crc kubenswrapper[4852]: E0129 10:42:59.462894 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:42:59 crc kubenswrapper[4852]: E0129 10:42:59.462993 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.547892 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.547944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.547956 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.547977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.547997 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.651107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.651168 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.651186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.651211 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.651227 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.675845 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 21:55:02.469563372 +0000 UTC Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.754418 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.754463 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.754473 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.754488 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.754499 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.857659 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.857697 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.857712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.857728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.857740 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.961342 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.961373 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.961381 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.961393 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.961402 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:42:59Z","lastTransitionTime":"2026-01-29T10:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.974955 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/3.log" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.979713 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:42:59 crc kubenswrapper[4852]: E0129 10:42:59.979864 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:42:59 crc kubenswrapper[4852]: I0129 10:42:59.990125 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:42:59Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.001746 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.013975 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.024823 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.040261 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.054014 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066754 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066810 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.066958 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.085552 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.100821 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:58Z\\\",\\\"message\\\":\\\"multus/network-metrics-daemon-bqdnv openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v openshift-ovn-kubernetes/ovnkube-node-22xhj openshift-image-registry/node-ca-r27t7 openshift-machine-config-operator/machine-config-daemon-zdz6d openshift-multus/multus-sd8vh]\\\\nI0129 10:42:58.362401 6941 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0129 10:42:58.362414 6941 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362422 6941 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362429 6941 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-sd8vh in node crc\\\\nI0129 10:42:58.362434 6941 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-sd8vh after 0 failed attempt(s)\\\\nI0129 10:42:58.362440 6941 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362454 6941 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:58.362515 6941 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.111197 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.122817 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.134853 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.148040 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.165954 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.169758 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.169837 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.169851 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.169892 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.169905 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.184034 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.204060 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.224888 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.235627 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.272357 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.272410 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.272424 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.272444 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.272460 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.375013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.375063 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.375079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.375100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.375115 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.477365 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.477424 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.477445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.477464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.477478 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.581006 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.581062 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.581074 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.581094 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.581109 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.676059 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 05:56:24.881031132 +0000 UTC Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.683565 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.683757 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.683790 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.683841 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.683866 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.778875 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.778931 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.778955 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.778977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.778990 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.791881 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.795684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.795729 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.795742 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.795763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.795776 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.809048 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.812869 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.812895 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.812904 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.812926 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.812938 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.824836 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.832238 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.832904 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.832933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.832954 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.832971 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.844569 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.848141 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.848186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.848198 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.848215 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.848616 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.858632 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:00Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:00 crc kubenswrapper[4852]: E0129 10:43:00.858734 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.860053 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.860085 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.860118 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.860131 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.860138 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.963706 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.963767 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.963780 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.964087 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:00 crc kubenswrapper[4852]: I0129 10:43:00.964119 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:00Z","lastTransitionTime":"2026-01-29T10:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.067530 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.067616 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.067635 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.067660 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.067677 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.170490 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.170558 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.170570 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.170608 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.170622 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.273463 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.273500 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.273515 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.273534 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.273548 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.376061 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.376136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.376144 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.376158 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.376169 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.462745 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.462827 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:01 crc kubenswrapper[4852]: E0129 10:43:01.462894 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.462911 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.462772 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:01 crc kubenswrapper[4852]: E0129 10:43:01.463020 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:01 crc kubenswrapper[4852]: E0129 10:43:01.463123 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:01 crc kubenswrapper[4852]: E0129 10:43:01.463216 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.478786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.478815 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.478823 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.478836 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.478844 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.581031 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.581100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.581120 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.581149 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.581167 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.676978 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 07:49:09.335526771 +0000 UTC Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.683364 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.683408 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.683419 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.683436 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.683449 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.785328 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.785377 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.785388 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.785405 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.785417 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.887995 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.888031 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.888041 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.888058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.888070 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.991180 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.991217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.991226 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.991240 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:01 crc kubenswrapper[4852]: I0129 10:43:01.991250 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:01Z","lastTransitionTime":"2026-01-29T10:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.093942 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.093989 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.094033 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.094053 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.094064 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.196386 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.196434 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.196445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.196461 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.196473 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.299011 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.299045 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.299056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.299072 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.299082 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.401460 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.401501 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.401508 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.401522 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.401531 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.503957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.503990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.503998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.504013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.504022 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.609482 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.609532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.609546 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.609563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.609575 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.677146 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 13:23:51.079845642 +0000 UTC Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.712275 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.712340 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.712355 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.712377 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.712390 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.815049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.815096 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.815107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.815125 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.815141 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.918199 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.918255 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.918270 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.918290 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:02 crc kubenswrapper[4852]: I0129 10:43:02.918305 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:02Z","lastTransitionTime":"2026-01-29T10:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.020391 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.020426 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.020435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.020448 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.020459 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.122497 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.122538 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.122548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.122563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.122575 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.225206 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.225248 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.225258 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.225274 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.225285 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.328734 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.328777 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.328794 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.328815 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.328831 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.431873 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.431916 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.431928 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.431944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.431957 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.462548 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.462628 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.462548 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:03 crc kubenswrapper[4852]: E0129 10:43:03.462727 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:03 crc kubenswrapper[4852]: E0129 10:43:03.462854 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:03 crc kubenswrapper[4852]: E0129 10:43:03.462964 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.463017 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:03 crc kubenswrapper[4852]: E0129 10:43:03.463147 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.476018 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.502079 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.519624 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.534734 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.534807 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.534825 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.534853 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.534871 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.543224 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.563845 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.578209 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.592812 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.608975 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.631161 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:58Z\\\",\\\"message\\\":\\\"multus/network-metrics-daemon-bqdnv openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v openshift-ovn-kubernetes/ovnkube-node-22xhj openshift-image-registry/node-ca-r27t7 openshift-machine-config-operator/machine-config-daemon-zdz6d openshift-multus/multus-sd8vh]\\\\nI0129 10:42:58.362401 6941 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0129 10:42:58.362414 6941 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362422 6941 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362429 6941 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-sd8vh in node crc\\\\nI0129 10:42:58.362434 6941 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-sd8vh after 0 failed attempt(s)\\\\nI0129 10:42:58.362440 6941 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362454 6941 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:58.362515 6941 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.636860 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.636882 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.636890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.636912 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.636922 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.642373 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.651778 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.666650 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.677297 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 23:34:24.334627681 +0000 UTC Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.680473 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.690237 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.704517 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.717795 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.730843 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.739058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.739099 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.739111 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.739129 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.739143 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.741809 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:03Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.841786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.841856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.841868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.841883 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.841895 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.944727 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.944765 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.944776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.944791 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:03 crc kubenswrapper[4852]: I0129 10:43:03.944802 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:03Z","lastTransitionTime":"2026-01-29T10:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.047690 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.047736 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.047747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.047763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.047775 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.151681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.151726 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.151737 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.151752 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.151764 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.254453 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.254484 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.254493 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.254507 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.254515 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.357029 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.357112 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.357128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.357148 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.357165 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.459615 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.459668 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.459680 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.459697 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.460139 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.562165 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.562196 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.562205 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.562217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.562225 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.664827 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.664863 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.664872 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.664887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.664896 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.677935 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 06:31:03.17772448 +0000 UTC Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.768384 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.769278 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.769418 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.769556 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.769749 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.873249 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.873302 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.873317 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.873339 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.873354 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.975887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.975948 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.975963 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.975984 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:04 crc kubenswrapper[4852]: I0129 10:43:04.976000 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:04Z","lastTransitionTime":"2026-01-29T10:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.079328 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.079718 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.079802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.079890 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.079988 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.182883 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.183190 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.183276 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.183379 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.183467 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.285745 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.285799 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.285812 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.285836 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.285848 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.388551 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.388636 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.388655 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.388680 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.388697 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.462864 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.462898 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:05 crc kubenswrapper[4852]: E0129 10:43:05.463036 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.463116 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.463194 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:05 crc kubenswrapper[4852]: E0129 10:43:05.463223 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:05 crc kubenswrapper[4852]: E0129 10:43:05.463373 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:05 crc kubenswrapper[4852]: E0129 10:43:05.463454 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.491373 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.491424 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.491441 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.491464 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.491482 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.594453 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.594520 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.594542 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.594569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.594630 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.678139 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 06:39:58.453067393 +0000 UTC Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.697506 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.697548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.697561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.697607 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.697622 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.800213 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.800850 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.800888 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.800917 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.800940 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.903915 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.903944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.903954 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.903967 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:05 crc kubenswrapper[4852]: I0129 10:43:05.903976 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:05Z","lastTransitionTime":"2026-01-29T10:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.006371 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.006444 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.006468 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.006495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.006512 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.108649 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.108686 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.108694 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.108707 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.108718 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.211063 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.211108 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.211119 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.211135 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.211147 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.313111 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.313164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.313175 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.313261 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.313276 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.416466 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.416530 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.416548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.416571 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.416613 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.520565 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.520637 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.520650 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.520666 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.520678 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.623948 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.624037 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.624049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.624079 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.624097 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.679146 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 21:58:45.989718041 +0000 UTC Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.726931 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.727001 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.727024 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.727058 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.727080 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.829983 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.830027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.830041 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.830061 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.830077 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.932983 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.933030 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.933039 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.933054 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:06 crc kubenswrapper[4852]: I0129 10:43:06.933063 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:06Z","lastTransitionTime":"2026-01-29T10:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.035760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.035822 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.035835 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.035874 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.035887 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.138415 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.138462 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.138474 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.138495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.138507 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.240919 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.240959 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.240981 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.241000 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.241014 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.340421 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.340556 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.340715 4852 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.340767 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.340750669 +0000 UTC m=+148.558081813 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.340788 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.3407753 +0000 UTC m=+148.558106434 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.342998 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.343069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.343088 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.343110 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.343127 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.441320 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.441370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.441418 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441541 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441560 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441572 4852 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441606 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441623 4852 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441630 4852 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441714 4852 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441644 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.441628362 +0000 UTC m=+148.658959496 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441754 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.441738845 +0000 UTC m=+148.659069979 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.441772 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.441764276 +0000 UTC m=+148.659095410 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.445910 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.445938 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.445947 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.445960 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.445971 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.463377 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.463390 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.463458 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.463558 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.463632 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.463694 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.463732 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:07 crc kubenswrapper[4852]: E0129 10:43:07.463923 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.548140 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.548185 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.548195 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.548215 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.548227 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.651484 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.651558 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.651574 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.651616 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.651627 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.680087 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 20:12:45.927221775 +0000 UTC Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.754230 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.754256 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.754264 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.754277 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.754286 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.858025 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.858086 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.858100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.858117 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.858129 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.960070 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.960119 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.960128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.960141 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:07 crc kubenswrapper[4852]: I0129 10:43:07.960150 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:07Z","lastTransitionTime":"2026-01-29T10:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.063864 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.063901 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.063913 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.063928 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.063938 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.166912 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.166971 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.166990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.167013 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.167030 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.269676 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.269970 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.270069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.270168 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.270247 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.373804 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.373943 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.373977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.374017 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.374058 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.478247 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.478312 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.478332 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.478357 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.478376 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.581465 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.581517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.581536 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.581561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.581613 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.680959 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 06:21:17.628961323 +0000 UTC Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.685040 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.685113 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.685136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.685169 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.685192 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.788877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.788923 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.788933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.788950 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.788960 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.892321 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.892368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.892380 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.892398 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.892412 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.995695 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.995741 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.995750 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.995770 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:08 crc kubenswrapper[4852]: I0129 10:43:08.995781 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:08Z","lastTransitionTime":"2026-01-29T10:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.098532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.098621 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.098636 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.098652 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.098668 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.200711 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.200795 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.200813 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.200841 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.200859 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.303794 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.303871 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.303905 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.303937 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.303962 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.407097 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.407160 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.407172 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.407191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.407204 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.462785 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:09 crc kubenswrapper[4852]: E0129 10:43:09.463701 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.462903 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:09 crc kubenswrapper[4852]: E0129 10:43:09.464162 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.462910 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.462983 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:09 crc kubenswrapper[4852]: E0129 10:43:09.464862 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:09 crc kubenswrapper[4852]: E0129 10:43:09.464731 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.510575 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.510840 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.510937 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.511028 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.511117 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.614233 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.614723 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.614845 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.614988 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.615078 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.681892 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 19:44:54.318374467 +0000 UTC Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.718696 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.719081 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.719249 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.719390 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.719627 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.822706 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.822781 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.822802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.822828 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.822860 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.926148 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.927495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.927782 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.928289 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:09 crc kubenswrapper[4852]: I0129 10:43:09.928859 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:09Z","lastTransitionTime":"2026-01-29T10:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.032443 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.032488 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.032499 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.032517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.032528 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.135760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.136242 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.136346 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.136445 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.136825 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.239070 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.239104 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.239116 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.239132 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.239144 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.344442 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.344486 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.344498 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.344518 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.344530 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.447247 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.447288 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.447298 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.447314 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.447326 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.550807 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.550846 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.550858 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.550877 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.550890 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.652917 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.653248 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.654180 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.654411 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.654628 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.684284 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 08:02:18.082973963 +0000 UTC Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.756738 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.756766 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.756788 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.756803 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.756812 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.859621 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.859653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.859671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.859688 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.859698 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.962450 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.962495 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.962507 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.962522 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:10 crc kubenswrapper[4852]: I0129 10:43:10.962533 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:10Z","lastTransitionTime":"2026-01-29T10:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.065652 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.065728 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.065747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.065771 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.065789 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.169795 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.169863 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.169886 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.169914 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.169939 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.204132 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.204176 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.204191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.204210 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.204225 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.222217 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.225934 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.225974 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.225985 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.226001 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.226011 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.242100 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.244866 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.244897 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.244908 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.244923 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.244935 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.258523 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.261637 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.261666 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.261683 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.261702 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.261714 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.272745 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.275309 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.275349 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.275379 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.275393 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.275401 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.288880 4852 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T10:43:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"6d167e52-c8b4-491a-b64e-fc81a677c102\\\",\\\"systemUUID\\\":\\\"6667fab7-c571-4f1d-8f14-ac3c8ed4cf48\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:11Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.289030 4852 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.290243 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.290306 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.290322 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.290339 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.290352 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.394311 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.394370 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.394389 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.394419 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.394441 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.462627 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.462630 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.462789 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.462871 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.463009 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.463185 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.463388 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:11 crc kubenswrapper[4852]: E0129 10:43:11.463499 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.480712 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.497047 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.497080 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.497090 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.497106 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.497117 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.599529 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.599633 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.599653 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.599679 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.599723 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.684751 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 22:37:16.404931031 +0000 UTC Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.702010 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.702046 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.702057 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.702072 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.702082 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.804642 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.804702 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.804736 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.804776 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.804799 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.907622 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.907664 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.907676 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.907695 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:11 crc kubenswrapper[4852]: I0129 10:43:11.907706 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:11Z","lastTransitionTime":"2026-01-29T10:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.009526 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.009630 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.009644 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.009660 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.009673 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.112459 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.112492 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.112500 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.112514 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.112523 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.215036 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.215107 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.215124 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.215151 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.215168 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.316864 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.316900 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.316911 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.316926 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.316938 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.419252 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.419302 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.419312 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.419330 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.419340 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.521471 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.521531 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.521539 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.521553 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.521561 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.623721 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.623775 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.623784 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.623803 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.623816 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.685001 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 07:21:05.087312366 +0000 UTC Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.726480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.726517 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.726528 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.726543 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.726554 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.829506 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.829558 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.829570 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.829611 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.829624 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.932378 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.932421 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.932433 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.932448 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:12 crc kubenswrapper[4852]: I0129 10:43:12.932457 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:12Z","lastTransitionTime":"2026-01-29T10:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.034669 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.034712 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.034722 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.034737 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.034746 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.137892 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.137950 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.137963 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.137987 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.138000 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.240821 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.240869 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.240880 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.240898 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.240911 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.343977 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.344022 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.344033 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.344049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.344060 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.446396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.446491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.446510 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.446569 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.446676 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.462828 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.462907 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.463046 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.463188 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:13 crc kubenswrapper[4852]: E0129 10:43:13.463078 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:13 crc kubenswrapper[4852]: E0129 10:43:13.462927 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:13 crc kubenswrapper[4852]: E0129 10:43:13.463357 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:13 crc kubenswrapper[4852]: E0129 10:43:13.463679 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.477040 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6grc8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"840b2302-f5bc-46f5-b645-9a759fe39d4d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d636f958755552c803bffa48ad711bbd8c129a50c27ff7bdc392b2bf351657d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kmq5z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6grc8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.492282 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.506809 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23a48459-954c-4e1a-bd79-bc6018bc255f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36702285f7be77a6a7b69a6ce5f09090435dd63f3b33cb6638c340599cecef54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvrzw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zdz6d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.527303 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mclx7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874aced6-eac8-456a-8d96-f2ab970a5989\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5949c62eb3dc2f53706de0bcc0287d4ce98e50257b655ab24a5962d0e3cd593\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://410c96e7cf7b78504018008d469901f4bfae133eb0bf54ff4e7169cb6e249aaf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3eb6b6d5d43bf2eaf3b2b78fafe1b5abca9fe928399dfb5680f2fc9f34f66cd2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://092a4059e920e8fc7d49edf48eeb243077383f11a9002cb5df93d934c89c9f45\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ed8db75a58d4e0e8be105443f696224274f3775350250a6a7bee824365dabeb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7059fe9fbc1763b6a7ca725e6f800bd14ff78109ea8df875e8856fe8ce9c0140\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed73c41dd01630618f7c427aeb33ba542f1d5d7c45cb97e6fcb8da4a947900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-msh5n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mclx7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.541109 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"448d3a4c-f7bd-4dcd-bc31-829892860a86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://be554f6ab2c3403f5e16a0aa42766f7e46e660e22ad3509786ab2365d72d8333\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd1dc09b9c94dce57a1ce6e5e504b673ee2ab8a126f9815d00a5e3443f024bc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.549726 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.549808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.549836 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.549868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.549890 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.570713 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7a5a3920-201e-4874-a05a-80a81347fe81\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a73c288b8e1fc599e0550e3c16f1029aae50f00071b6bb415fbc0bfa23df8137\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41cc0f3c20e6e3238f7b2afa0ee16d23a5391238e63842a12e805c8bfc523322\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdebc5c401ca83413a02f142f849b56184aa03583efa1ee707e484e872c1a69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://27b8a4a075accd549db818f5a98fb089d16950dac2305da5f859a1bc96c7e10a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd35889adcdb4d420b097b551045c9e7be1e83c26468d051b0c65606855c79f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c2c6f545e9e367d20dc7633ffe4d770cfb5287d1fdf35058816d6f69062d7fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c2c6f545e9e367d20dc7633ffe4d770cfb5287d1fdf35058816d6f69062d7fa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd98b4d1ffd6f3deb2392c2541e5f789283166209ca43224ec050372f79e71e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd98b4d1ffd6f3deb2392c2541e5f789283166209ca43224ec050372f79e71e0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://172a5cacc2a164434527fb6826e2d424a279e4192c9df514179003a0fd7db1be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://172a5cacc2a164434527fb6826e2d424a279e4192c9df514179003a0fd7db1be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.586255 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://103ce0114ecfabfab2364da9faffe660254543afc56adabacc9280fae103b228\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.600043 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d6fdb4bfd7cf454ee5c0dad812c926d9399cc87915e3cf4572d17e047c1c9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.615478 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd8vh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80701ea9-a994-4a9f-8291-e3e40decfeda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:54Z\\\",\\\"message\\\":\\\"2026-01-29T10:42:08+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c\\\\n2026-01-29T10:42:08+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2588bf01-0d79-4450-b235-95b02822170c to /host/opt/cni/bin/\\\\n2026-01-29T10:42:09Z [verbose] multus-daemon started\\\\n2026-01-29T10:42:09Z [verbose] Readiness Indicator file check\\\\n2026-01-29T10:42:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjdv5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd8vh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.635799 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e44156c-fa1a-4edf-a317-e63b96f7aae4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T10:42:58Z\\\",\\\"message\\\":\\\"multus/network-metrics-daemon-bqdnv openshift-network-diagnostics/network-check-target-xd92c openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v openshift-ovn-kubernetes/ovnkube-node-22xhj openshift-image-registry/node-ca-r27t7 openshift-machine-config-operator/machine-config-daemon-zdz6d openshift-multus/multus-sd8vh]\\\\nI0129 10:42:58.362401 6941 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0129 10:42:58.362414 6941 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362422 6941 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362429 6941 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-sd8vh in node crc\\\\nI0129 10:42:58.362434 6941 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-sd8vh after 0 failed attempt(s)\\\\nI0129 10:42:58.362440 6941 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-sd8vh\\\\nI0129 10:42:58.362454 6941 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0129 10:42:58.362515 6941 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qsng4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-22xhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652139 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90278d21ae41e18d1219a57464ff1c8e5131ed4705b3f4f229a8d734717114dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://af2029be7c4c1154396229d0fabffb2c6abc03e2e1105882e315a755f390095f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652320 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652346 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652356 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652371 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.652381 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.665686 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.676259 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31063684-b722-4141-a737-40f04b50f3ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://052a5116a72f7c2d2717583100f48ec919246a14b715b79fcecab9212aab0396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c77652099ba3eb74559a2f96c334ce0db4b417e5f636f78e09e96ff855bb38b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2x488\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-msg8v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.685793 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 10:53:31.974238091 +0000 UTC Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.689844 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"164c5c48-74e2-4528-aec4-71520c57d8ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e4f7070ad946f38abd87217e82d342317d1152d646c48fef51b733337d97f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c67dc5202497c02ba3dba3eef558f30f4455e0c5bc9e12eb7e5193ac5f2c08b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc8751f8e7d1fd54fd105a0077534d5aeda5aa4d84020679b8919af277e5dc8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://587f5a8034aff6561fd86cae79fc1a01349e286888d14a7155c71b42432f8d0d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.703561 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2ad4e9-8afb-40de-8cdd-cd585eb69ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1732d9217ab79af1899e8b3587ca102382ec951f071fc4338f60a882ec4fdbd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de59103c9a4af3ddc28eff0fbe9bcde9b0d2e85b5fb1e0df03ba550656a784ab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cab8afd2e7dd2e262214358ec368c20a5047ca63c99e4d5a4b00fc1a42a3dc5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.720083 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ee5480f9-5e52-428b-9a1b-f49689d99ab4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:41:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T10:42:05Z\\\",\\\"message\\\":\\\"file observer\\\\nW0129 10:42:04.266039 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 10:42:04.266138 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 10:42:04.267073 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-987928893/tls.crt::/tmp/serving-cert-987928893/tls.key\\\\\\\"\\\\nI0129 10:42:05.648959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 10:42:05.661744 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 10:42:05.661774 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 10:42:05.661797 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 10:42:05.661803 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 10:42:05.707295 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 10:42:05.707326 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707336 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 10:42:05.707345 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 10:42:05.707351 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 10:42:05.707358 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 10:42:05.707364 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 10:42:05.707617 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 10:42:05.733039 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:41:46Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T10:41:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T10:41:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:41:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.730611 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r27t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c30fd-7234-4f76-8005-c86304d0d94a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe74b331b8d806d0f02125567e5ee2606ad491ea17693784c05c4f7bd85f0623\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T10:42:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hv6bz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:03Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r27t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.744354 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.755319 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.755354 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.755363 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.755378 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.755388 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.756373 4852 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d44fabb-f3c2-4492-9ab4-567a81928ccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T10:42:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cs2mw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T10:42:18Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bqdnv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T10:43:13Z is after 2025-08-24T17:21:41Z" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.860103 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.860178 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.860194 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.860217 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.860238 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.963010 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.963360 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.963491 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.963667 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:13 crc kubenswrapper[4852]: I0129 10:43:13.963807 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:13Z","lastTransitionTime":"2026-01-29T10:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.066511 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.066548 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.066557 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.066572 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.066612 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.170537 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.170607 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.170631 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.170652 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.170669 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.273368 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.273412 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.273536 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.273561 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.273599 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.377156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.377802 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.377856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.377925 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.377946 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.480542 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.480609 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.480625 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.480644 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.480655 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.583392 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.583428 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.583439 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.583456 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.583466 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.685976 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 10:51:29.326522114 +0000 UTC Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.686465 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.686509 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.686518 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.686532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.686542 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.788925 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.788952 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.788960 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.788972 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.788981 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.892416 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.892474 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.892492 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.892516 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.892536 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.996205 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.996266 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.996283 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.996308 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:14 crc kubenswrapper[4852]: I0129 10:43:14.996324 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:14Z","lastTransitionTime":"2026-01-29T10:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.099626 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.099687 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.099705 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.099732 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.099750 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.202480 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.202524 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.202533 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.202547 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.202556 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.305281 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.305316 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.305323 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.305336 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.305345 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.408122 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.408178 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.408191 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.408209 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.408220 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.463802 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.463882 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:15 crc kubenswrapper[4852]: E0129 10:43:15.463937 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.463964 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.463996 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:15 crc kubenswrapper[4852]: E0129 10:43:15.464103 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:15 crc kubenswrapper[4852]: E0129 10:43:15.464178 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:15 crc kubenswrapper[4852]: E0129 10:43:15.464248 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.464951 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:43:15 crc kubenswrapper[4852]: E0129 10:43:15.465134 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.510130 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.510201 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.510222 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.510259 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.510281 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.612929 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.613043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.613059 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.613083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.613102 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.686875 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 23:53:43.270193303 +0000 UTC Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.715663 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.715723 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.715739 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.715756 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.715767 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.818755 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.818796 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.818807 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.818822 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.818832 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.923760 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.923808 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.923821 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.923841 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:15 crc kubenswrapper[4852]: I0129 10:43:15.923855 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:15Z","lastTransitionTime":"2026-01-29T10:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.026470 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.026544 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.026557 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.026574 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.026607 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.129598 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.129657 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.129668 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.129684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.129696 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.232385 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.232435 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.232456 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.232474 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.232487 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.335190 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.335265 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.335281 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.335296 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.335305 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.437887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.437929 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.437941 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.437957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.437969 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.540129 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.540156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.540164 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.540176 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.540185 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.642498 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.642671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.642687 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.642706 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.642722 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.687905 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 10:21:20.784186062 +0000 UTC Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.745150 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.745186 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.745197 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.745214 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.745227 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.848049 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.848116 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.848138 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.848166 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.848191 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.950960 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.950999 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.951027 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.951043 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:16 crc kubenswrapper[4852]: I0129 10:43:16.951054 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:16Z","lastTransitionTime":"2026-01-29T10:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.054699 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.054778 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.054797 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.054824 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.054843 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.157258 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.157297 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.157305 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.157318 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.157328 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.259997 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.260047 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.260056 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.260069 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.260078 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.362660 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.362731 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.362747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.362763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.362773 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.462613 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.462644 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.462681 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:17 crc kubenswrapper[4852]: E0129 10:43:17.462738 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.462759 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:17 crc kubenswrapper[4852]: E0129 10:43:17.462832 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:17 crc kubenswrapper[4852]: E0129 10:43:17.462987 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:17 crc kubenswrapper[4852]: E0129 10:43:17.463057 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.465050 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.465113 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.465136 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.465163 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.465185 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.567684 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.567747 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.567763 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.567787 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.567804 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.670606 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.670655 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.670664 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.670679 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.670690 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.688884 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 13:53:28.373099704 +0000 UTC Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.772713 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.772759 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.772769 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.772786 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.772796 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.875071 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.875132 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.875144 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.875163 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.875176 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.978669 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.978722 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.978744 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.978765 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:17 crc kubenswrapper[4852]: I0129 10:43:17.978777 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:17Z","lastTransitionTime":"2026-01-29T10:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.080831 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.080878 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.080888 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.080906 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.080917 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.183542 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.183745 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.183765 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.183790 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.183847 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.286562 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.286671 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.286719 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.286743 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.286767 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.389428 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.389497 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.389515 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.389532 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.389593 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.492623 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.492656 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.492666 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.492681 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.492691 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.595085 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.595128 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.595139 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.595156 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.595167 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.689856 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:56:31.652343076 +0000 UTC Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.697533 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.697563 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.697571 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.697609 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.697621 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.800504 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.800570 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.800603 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.800624 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.800635 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.903572 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.903629 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.903639 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.903657 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:18 crc kubenswrapper[4852]: I0129 10:43:18.903670 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:18Z","lastTransitionTime":"2026-01-29T10:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.006303 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.006352 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.006362 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.006379 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.006390 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.108404 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.108449 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.108460 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.108477 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.108490 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.211693 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.211933 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.211993 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.212015 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.212058 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.314279 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.314313 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.314321 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.314336 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.314347 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.417034 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.417072 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.417083 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.417100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.417110 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.462798 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.462855 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.462895 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.462911 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:19 crc kubenswrapper[4852]: E0129 10:43:19.462988 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:19 crc kubenswrapper[4852]: E0129 10:43:19.463067 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:19 crc kubenswrapper[4852]: E0129 10:43:19.463155 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:19 crc kubenswrapper[4852]: E0129 10:43:19.463230 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.519927 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.520359 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.520378 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.520396 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.520433 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.622990 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.623037 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.623051 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.623067 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.623078 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.690176 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 10:35:23.43105913 +0000 UTC Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.725638 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.725677 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.725687 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.725700 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.725710 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.827887 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.827968 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.827980 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.827996 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.828009 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.931306 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.931363 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.931372 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.931388 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:19 crc kubenswrapper[4852]: I0129 10:43:19.931397 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:19Z","lastTransitionTime":"2026-01-29T10:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.033427 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.033471 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.033482 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.033497 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.033507 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.137695 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.137742 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.137753 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.137770 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.137782 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.240545 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.240607 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.240617 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.240635 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.240648 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.346751 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.346793 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.346803 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.346820 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.346831 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.448994 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.449077 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.449091 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.449105 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.449115 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.551573 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.551626 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.551633 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.551646 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.551655 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.654364 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.654402 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.654411 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.654425 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.654434 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.690959 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 08:36:04.521330152 +0000 UTC Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.756910 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.756936 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.756944 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.756957 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.756966 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.859746 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.859856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.859876 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.859958 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.860732 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.964063 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.964142 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.964153 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.964239 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:20 crc kubenswrapper[4852]: I0129 10:43:20.964508 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:20Z","lastTransitionTime":"2026-01-29T10:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.067497 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.067564 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.067645 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.067673 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.067688 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.171100 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.171150 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.171171 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.171196 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.171213 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.274856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.274910 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.274932 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.274964 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.275070 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.377928 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.378000 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.378025 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.378059 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.378087 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.463312 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.463351 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.463383 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.463333 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:21 crc kubenswrapper[4852]: E0129 10:43:21.463493 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:21 crc kubenswrapper[4852]: E0129 10:43:21.463714 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:21 crc kubenswrapper[4852]: E0129 10:43:21.463878 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:21 crc kubenswrapper[4852]: E0129 10:43:21.463969 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.481345 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.481399 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.481416 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.481439 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.481457 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.499810 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.499856 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.499868 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.499885 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.499896 4852 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T10:43:21Z","lastTransitionTime":"2026-01-29T10:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.545216 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt"] Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.545615 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.547935 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.548115 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.548196 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.549196 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.587063 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=29.587040695 podStartE2EDuration="29.587040695s" podCreationTimestamp="2026-01-29 10:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.58683086 +0000 UTC m=+98.804161984" watchObservedRunningTime="2026-01-29 10:43:21.587040695 +0000 UTC m=+98.804371839" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.600174 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.600217 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/915faca0-72bd-4b32-95cc-cd5226725b05-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.600270 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/915faca0-72bd-4b32-95cc-cd5226725b05-service-ca\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.600310 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.600400 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/915faca0-72bd-4b32-95cc-cd5226725b05-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.617747 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=10.617729005 podStartE2EDuration="10.617729005s" podCreationTimestamp="2026-01-29 10:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.616850284 +0000 UTC m=+98.834181498" watchObservedRunningTime="2026-01-29 10:43:21.617729005 +0000 UTC m=+98.835060129" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.671894 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-6grc8" podStartSLOduration=78.671876322 podStartE2EDuration="1m18.671876322s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.660346073 +0000 UTC m=+98.877677207" watchObservedRunningTime="2026-01-29 10:43:21.671876322 +0000 UTC m=+98.889207456" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.692067 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 20:22:53.059607971 +0000 UTC Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.692358 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.700830 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podStartSLOduration=78.700805129 podStartE2EDuration="1m18.700805129s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.683777888 +0000 UTC m=+98.901109022" watchObservedRunningTime="2026-01-29 10:43:21.700805129 +0000 UTC m=+98.918136273" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701473 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701556 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/915faca0-72bd-4b32-95cc-cd5226725b05-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701606 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701626 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/915faca0-72bd-4b32-95cc-cd5226725b05-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701667 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/915faca0-72bd-4b32-95cc-cd5226725b05-service-ca\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701710 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.701812 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/915faca0-72bd-4b32-95cc-cd5226725b05-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.702531 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/915faca0-72bd-4b32-95cc-cd5226725b05-service-ca\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.704830 4852 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.709001 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/915faca0-72bd-4b32-95cc-cd5226725b05-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.730144 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-mclx7" podStartSLOduration=78.730129116 podStartE2EDuration="1m18.730129116s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.701140178 +0000 UTC m=+98.918471322" watchObservedRunningTime="2026-01-29 10:43:21.730129116 +0000 UTC m=+98.947460250" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.730342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/915faca0-72bd-4b32-95cc-cd5226725b05-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-h6znt\" (UID: \"915faca0-72bd-4b32-95cc-cd5226725b05\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.730511 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-sd8vh" podStartSLOduration=78.730508025 podStartE2EDuration="1m18.730508025s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.72987639 +0000 UTC m=+98.947207524" watchObservedRunningTime="2026-01-29 10:43:21.730508025 +0000 UTC m=+98.947839159" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.799851 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=50.799836078 podStartE2EDuration="50.799836078s" podCreationTimestamp="2026-01-29 10:42:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.799363177 +0000 UTC m=+99.016694331" watchObservedRunningTime="2026-01-29 10:43:21.799836078 +0000 UTC m=+99.017167212" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.813436 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=77.813420116 podStartE2EDuration="1m17.813420116s" podCreationTimestamp="2026-01-29 10:42:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.812558734 +0000 UTC m=+99.029889948" watchObservedRunningTime="2026-01-29 10:43:21.813420116 +0000 UTC m=+99.030751250" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.831031 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.831006619 podStartE2EDuration="1m15.831006619s" podCreationTimestamp="2026-01-29 10:42:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.830005976 +0000 UTC m=+99.047337110" watchObservedRunningTime="2026-01-29 10:43:21.831006619 +0000 UTC m=+99.048337773" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.842798 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-r27t7" podStartSLOduration=78.842781814 podStartE2EDuration="1m18.842781814s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.840972539 +0000 UTC m=+99.058303673" watchObservedRunningTime="2026-01-29 10:43:21.842781814 +0000 UTC m=+99.060112948" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.877968 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-msg8v" podStartSLOduration=78.877951852 podStartE2EDuration="1m18.877951852s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:21.87539356 +0000 UTC m=+99.092724714" watchObservedRunningTime="2026-01-29 10:43:21.877951852 +0000 UTC m=+99.095282986" Jan 29 10:43:21 crc kubenswrapper[4852]: I0129 10:43:21.879538 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" Jan 29 10:43:22 crc kubenswrapper[4852]: I0129 10:43:22.041327 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" event={"ID":"915faca0-72bd-4b32-95cc-cd5226725b05","Type":"ContainerStarted","Data":"cb076dbf9b995877306a20a4d7fad15415f08095d5a9501affb940cf8c139866"} Jan 29 10:43:22 crc kubenswrapper[4852]: I0129 10:43:22.104314 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:22 crc kubenswrapper[4852]: E0129 10:43:22.104424 4852 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:43:22 crc kubenswrapper[4852]: E0129 10:43:22.104482 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs podName:2d44fabb-f3c2-4492-9ab4-567a81928ccc nodeName:}" failed. No retries permitted until 2026-01-29 10:44:26.104464206 +0000 UTC m=+163.321795340 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs") pod "network-metrics-daemon-bqdnv" (UID: "2d44fabb-f3c2-4492-9ab4-567a81928ccc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.047770 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" event={"ID":"915faca0-72bd-4b32-95cc-cd5226725b05","Type":"ContainerStarted","Data":"c4fffd6323cc5f649db02ac889f9b0cf706ad2cee391a464a94459d45ac29a66"} Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.070524 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-h6znt" podStartSLOduration=80.070187338 podStartE2EDuration="1m20.070187338s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:23.066375357 +0000 UTC m=+100.283706521" watchObservedRunningTime="2026-01-29 10:43:23.070187338 +0000 UTC m=+100.287518472" Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.463125 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:23 crc kubenswrapper[4852]: E0129 10:43:23.465228 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.465300 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.465364 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:23 crc kubenswrapper[4852]: I0129 10:43:23.465418 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:23 crc kubenswrapper[4852]: E0129 10:43:23.465632 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:23 crc kubenswrapper[4852]: E0129 10:43:23.465734 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:23 crc kubenswrapper[4852]: E0129 10:43:23.465838 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:25 crc kubenswrapper[4852]: I0129 10:43:25.462937 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:25 crc kubenswrapper[4852]: I0129 10:43:25.463000 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:25 crc kubenswrapper[4852]: I0129 10:43:25.463146 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:25 crc kubenswrapper[4852]: I0129 10:43:25.463267 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:25 crc kubenswrapper[4852]: E0129 10:43:25.463755 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:25 crc kubenswrapper[4852]: E0129 10:43:25.463823 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:25 crc kubenswrapper[4852]: E0129 10:43:25.464002 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:25 crc kubenswrapper[4852]: E0129 10:43:25.464231 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:27 crc kubenswrapper[4852]: I0129 10:43:27.462495 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:27 crc kubenswrapper[4852]: I0129 10:43:27.462529 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:27 crc kubenswrapper[4852]: E0129 10:43:27.462620 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:27 crc kubenswrapper[4852]: I0129 10:43:27.462700 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:27 crc kubenswrapper[4852]: I0129 10:43:27.462828 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:27 crc kubenswrapper[4852]: E0129 10:43:27.462879 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:27 crc kubenswrapper[4852]: E0129 10:43:27.463058 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:27 crc kubenswrapper[4852]: I0129 10:43:27.463202 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:43:27 crc kubenswrapper[4852]: E0129 10:43:27.463284 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:27 crc kubenswrapper[4852]: E0129 10:43:27.463364 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-22xhj_openshift-ovn-kubernetes(2e44156c-fa1a-4edf-a317-e63b96f7aae4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" Jan 29 10:43:29 crc kubenswrapper[4852]: I0129 10:43:29.463390 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:29 crc kubenswrapper[4852]: I0129 10:43:29.463434 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:29 crc kubenswrapper[4852]: I0129 10:43:29.463482 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:29 crc kubenswrapper[4852]: E0129 10:43:29.463567 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:29 crc kubenswrapper[4852]: I0129 10:43:29.463769 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:29 crc kubenswrapper[4852]: E0129 10:43:29.463861 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:29 crc kubenswrapper[4852]: E0129 10:43:29.463943 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:29 crc kubenswrapper[4852]: E0129 10:43:29.464038 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:31 crc kubenswrapper[4852]: I0129 10:43:31.463026 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:31 crc kubenswrapper[4852]: I0129 10:43:31.463061 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:31 crc kubenswrapper[4852]: I0129 10:43:31.463121 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:31 crc kubenswrapper[4852]: E0129 10:43:31.463167 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:31 crc kubenswrapper[4852]: I0129 10:43:31.463194 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:31 crc kubenswrapper[4852]: E0129 10:43:31.463270 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:31 crc kubenswrapper[4852]: E0129 10:43:31.463428 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:31 crc kubenswrapper[4852]: E0129 10:43:31.463491 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:33 crc kubenswrapper[4852]: I0129 10:43:33.463105 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:33 crc kubenswrapper[4852]: I0129 10:43:33.463156 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:33 crc kubenswrapper[4852]: I0129 10:43:33.463045 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:33 crc kubenswrapper[4852]: I0129 10:43:33.463107 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:33 crc kubenswrapper[4852]: E0129 10:43:33.464424 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:33 crc kubenswrapper[4852]: E0129 10:43:33.464501 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:33 crc kubenswrapper[4852]: E0129 10:43:33.464616 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:33 crc kubenswrapper[4852]: E0129 10:43:33.464683 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:35 crc kubenswrapper[4852]: I0129 10:43:35.463129 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:35 crc kubenswrapper[4852]: I0129 10:43:35.463172 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:35 crc kubenswrapper[4852]: E0129 10:43:35.463301 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:35 crc kubenswrapper[4852]: I0129 10:43:35.463653 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:35 crc kubenswrapper[4852]: E0129 10:43:35.463882 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:35 crc kubenswrapper[4852]: I0129 10:43:35.463988 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:35 crc kubenswrapper[4852]: E0129 10:43:35.464087 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:35 crc kubenswrapper[4852]: E0129 10:43:35.464231 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:37 crc kubenswrapper[4852]: I0129 10:43:37.462519 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:37 crc kubenswrapper[4852]: I0129 10:43:37.462521 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:37 crc kubenswrapper[4852]: E0129 10:43:37.463156 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:37 crc kubenswrapper[4852]: I0129 10:43:37.462564 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:37 crc kubenswrapper[4852]: I0129 10:43:37.462551 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:37 crc kubenswrapper[4852]: E0129 10:43:37.463303 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:37 crc kubenswrapper[4852]: E0129 10:43:37.463515 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:37 crc kubenswrapper[4852]: E0129 10:43:37.463704 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:39 crc kubenswrapper[4852]: I0129 10:43:39.462975 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:39 crc kubenswrapper[4852]: I0129 10:43:39.463081 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:39 crc kubenswrapper[4852]: I0129 10:43:39.463122 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:39 crc kubenswrapper[4852]: I0129 10:43:39.463092 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:39 crc kubenswrapper[4852]: E0129 10:43:39.463227 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:39 crc kubenswrapper[4852]: E0129 10:43:39.463407 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:39 crc kubenswrapper[4852]: E0129 10:43:39.463573 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:39 crc kubenswrapper[4852]: E0129 10:43:39.463771 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:40 crc kubenswrapper[4852]: I0129 10:43:40.464141 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.111697 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/3.log" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.114854 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerStarted","Data":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.115337 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.116852 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/1.log" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.117447 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/0.log" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.117521 4852 generic.go:334] "Generic (PLEG): container finished" podID="80701ea9-a994-4a9f-8291-e3e40decfeda" containerID="ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1" exitCode=1 Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.117568 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerDied","Data":"ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1"} Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.117663 4852 scope.go:117] "RemoveContainer" containerID="95533d8d0b1cf79a386dbe48d744ae0217e7c0cd688640d2c5888974bc307460" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.118355 4852 scope.go:117] "RemoveContainer" containerID="ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1" Jan 29 10:43:41 crc kubenswrapper[4852]: E0129 10:43:41.118710 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-sd8vh_openshift-multus(80701ea9-a994-4a9f-8291-e3e40decfeda)\"" pod="openshift-multus/multus-sd8vh" podUID="80701ea9-a994-4a9f-8291-e3e40decfeda" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.162506 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podStartSLOduration=98.162485915 podStartE2EDuration="1m38.162485915s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:43:41.160719643 +0000 UTC m=+118.378050827" watchObservedRunningTime="2026-01-29 10:43:41.162485915 +0000 UTC m=+118.379817069" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.349070 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bqdnv"] Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.349171 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:41 crc kubenswrapper[4852]: E0129 10:43:41.349241 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.463206 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.463230 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:41 crc kubenswrapper[4852]: E0129 10:43:41.463339 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:41 crc kubenswrapper[4852]: I0129 10:43:41.463379 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:41 crc kubenswrapper[4852]: E0129 10:43:41.463479 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:41 crc kubenswrapper[4852]: E0129 10:43:41.463546 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:42 crc kubenswrapper[4852]: I0129 10:43:42.123215 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/1.log" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.434241 4852 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 29 10:43:43 crc kubenswrapper[4852]: I0129 10:43:43.463223 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:43 crc kubenswrapper[4852]: I0129 10:43:43.463232 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:43 crc kubenswrapper[4852]: I0129 10:43:43.463237 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:43 crc kubenswrapper[4852]: I0129 10:43:43.463297 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.464100 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.464257 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.464298 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.464354 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:43 crc kubenswrapper[4852]: E0129 10:43:43.558694 4852 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:43:45 crc kubenswrapper[4852]: I0129 10:43:45.463349 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:45 crc kubenswrapper[4852]: I0129 10:43:45.463468 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:45 crc kubenswrapper[4852]: E0129 10:43:45.463495 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:45 crc kubenswrapper[4852]: I0129 10:43:45.463349 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:45 crc kubenswrapper[4852]: E0129 10:43:45.463847 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:45 crc kubenswrapper[4852]: E0129 10:43:45.464318 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:45 crc kubenswrapper[4852]: I0129 10:43:45.465101 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:45 crc kubenswrapper[4852]: E0129 10:43:45.465410 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:47 crc kubenswrapper[4852]: I0129 10:43:47.462730 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:47 crc kubenswrapper[4852]: I0129 10:43:47.462772 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:47 crc kubenswrapper[4852]: I0129 10:43:47.462877 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:47 crc kubenswrapper[4852]: E0129 10:43:47.463001 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:47 crc kubenswrapper[4852]: I0129 10:43:47.463233 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:47 crc kubenswrapper[4852]: E0129 10:43:47.463296 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:47 crc kubenswrapper[4852]: E0129 10:43:47.463446 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:47 crc kubenswrapper[4852]: E0129 10:43:47.463617 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:48 crc kubenswrapper[4852]: E0129 10:43:48.560528 4852 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:43:49 crc kubenswrapper[4852]: I0129 10:43:49.462769 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:49 crc kubenswrapper[4852]: I0129 10:43:49.462821 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:49 crc kubenswrapper[4852]: I0129 10:43:49.462821 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:49 crc kubenswrapper[4852]: I0129 10:43:49.462938 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:49 crc kubenswrapper[4852]: E0129 10:43:49.462940 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:49 crc kubenswrapper[4852]: E0129 10:43:49.463036 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:49 crc kubenswrapper[4852]: E0129 10:43:49.463099 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:49 crc kubenswrapper[4852]: E0129 10:43:49.463139 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:51 crc kubenswrapper[4852]: I0129 10:43:51.462771 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:51 crc kubenswrapper[4852]: I0129 10:43:51.462789 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:51 crc kubenswrapper[4852]: I0129 10:43:51.462944 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:51 crc kubenswrapper[4852]: I0129 10:43:51.463200 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:51 crc kubenswrapper[4852]: E0129 10:43:51.463247 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:51 crc kubenswrapper[4852]: E0129 10:43:51.463408 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:51 crc kubenswrapper[4852]: E0129 10:43:51.463541 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:51 crc kubenswrapper[4852]: E0129 10:43:51.463146 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:52 crc kubenswrapper[4852]: I0129 10:43:52.463258 4852 scope.go:117] "RemoveContainer" containerID="ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1" Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.169794 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/1.log" Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.170122 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerStarted","Data":"4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec"} Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.462916 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:53 crc kubenswrapper[4852]: E0129 10:43:53.463091 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.463852 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.464402 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:53 crc kubenswrapper[4852]: I0129 10:43:53.464483 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:53 crc kubenswrapper[4852]: E0129 10:43:53.464399 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:53 crc kubenswrapper[4852]: E0129 10:43:53.464558 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:53 crc kubenswrapper[4852]: E0129 10:43:53.464637 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:53 crc kubenswrapper[4852]: E0129 10:43:53.561715 4852 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:43:55 crc kubenswrapper[4852]: I0129 10:43:55.462916 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:55 crc kubenswrapper[4852]: E0129 10:43:55.463889 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:55 crc kubenswrapper[4852]: I0129 10:43:55.464126 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:55 crc kubenswrapper[4852]: E0129 10:43:55.464296 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:55 crc kubenswrapper[4852]: I0129 10:43:55.464341 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:55 crc kubenswrapper[4852]: E0129 10:43:55.464521 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:55 crc kubenswrapper[4852]: I0129 10:43:55.464839 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:55 crc kubenswrapper[4852]: E0129 10:43:55.465155 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:57 crc kubenswrapper[4852]: I0129 10:43:57.462512 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:57 crc kubenswrapper[4852]: I0129 10:43:57.462619 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:57 crc kubenswrapper[4852]: E0129 10:43:57.462755 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 10:43:57 crc kubenswrapper[4852]: I0129 10:43:57.462790 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:57 crc kubenswrapper[4852]: E0129 10:43:57.462933 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 10:43:57 crc kubenswrapper[4852]: E0129 10:43:57.463055 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 10:43:57 crc kubenswrapper[4852]: I0129 10:43:57.463712 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:57 crc kubenswrapper[4852]: E0129 10:43:57.463879 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bqdnv" podUID="2d44fabb-f3c2-4492-9ab4-567a81928ccc" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.463295 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.463295 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.463333 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.463698 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.465454 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.465866 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.466002 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.466143 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.466484 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 10:43:59 crc kubenswrapper[4852]: I0129 10:43:59.466871 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.489474 4852 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.523259 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.524016 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-cbgpk"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.524708 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-84ffc"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.524809 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.525191 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.525694 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.528507 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.530381 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.533270 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.536086 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.537936 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.537999 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.538194 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.538239 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.538302 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.538374 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.543061 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.544024 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.554808 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gz49m"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.562388 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.562761 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.562980 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.563237 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.563310 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.563945 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564262 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564347 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564386 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564442 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564479 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564535 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564572 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564708 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564750 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.565263 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564786 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564821 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564845 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564871 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.564911 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.565762 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.566048 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.566400 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.566622 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djxrn"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.566965 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.567293 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.569060 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-p46mf"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.569624 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.570363 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.571015 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4tqc\" (UniqueName: \"kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.571589 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.571733 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-encryption-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.571853 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-audit\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.571974 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.572085 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.572189 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-etcd-client\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.572298 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-node-pullsecrets\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.572426 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrkb8\" (UniqueName: \"kubernetes.io/projected/50913b72-962f-4911-9d29-128636768457-kube-api-access-qrkb8\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.572655 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-audit-dir\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.573291 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.573469 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-trusted-ca-bundle\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.573575 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-image-import-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.573720 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-serving-cert\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.573840 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-etcd-serving-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.577348 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.577362 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.577830 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.577953 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578027 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578092 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578230 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578257 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578314 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578394 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578431 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578438 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578466 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578525 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578568 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578678 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578763 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578890 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578901 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578777 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.578832 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.579037 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.579062 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.580631 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.580722 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.580820 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582360 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582468 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582548 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582642 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582710 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582783 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.582908 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.583121 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.583219 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.583359 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.583486 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.584762 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.584966 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585018 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585118 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585214 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585352 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585495 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.585646 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.586392 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.587510 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-sqjq7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.587780 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.588240 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.588663 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.588893 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.589550 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.591615 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.615099 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.634934 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.635207 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nmrj8"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.635659 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.635992 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.636387 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638214 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638269 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638447 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638571 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638760 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638837 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.638961 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.639071 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.640294 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.640388 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.640470 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.640739 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.640857 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zh2lg"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.641330 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.641610 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.641855 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.642014 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.643497 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.644155 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.644541 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.644690 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.644827 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.645131 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.645727 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.646086 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.647438 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.647613 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.647726 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.649100 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vkn85"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.650615 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.650778 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.651210 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.651430 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.651491 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.651708 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.651966 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.652135 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.652498 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.653782 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.654391 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.655219 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.655796 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.661389 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-tg9p8"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.661786 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.663548 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.668357 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.670689 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.671427 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.671976 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.673338 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.673808 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674184 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-etcd-client\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674211 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-node-pullsecrets\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674244 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674259 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-service-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674275 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-audit-dir\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674291 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrkb8\" (UniqueName: \"kubernetes.io/projected/50913b72-962f-4911-9d29-128636768457-kube-api-access-qrkb8\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674309 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d26nh\" (UniqueName: \"kubernetes.io/projected/9739b566-c925-4c9d-824f-94a8a5b9fb43-kube-api-access-d26nh\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674385 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9739b566-c925-4c9d-824f-94a8a5b9fb43-serving-cert\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674429 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-audit-dir\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674534 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/50913b72-962f-4911-9d29-128636768457-node-pullsecrets\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674548 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674646 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-trusted-ca-bundle\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674772 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-image-import-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674827 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-serving-cert\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674851 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-config\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674902 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-etcd-serving-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674927 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674949 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4tqc\" (UniqueName: \"kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.674973 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-encryption-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675002 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-audit\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h64jk\" (UniqueName: \"kubernetes.io/projected/1cb28b3e-a772-4541-a845-34fd991c6162-kube-api-access-h64jk\") pod \"downloads-7954f5f757-sqjq7\" (UID: \"1cb28b3e-a772-4541-a845-34fd991c6162\") " pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675045 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675064 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675362 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675876 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675891 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-trusted-ca-bundle\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.675944 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.676124 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-image-import-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.676369 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-etcd-serving-ca\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.676613 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.676839 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/50913b72-962f-4911-9d29-128636768457-audit\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.677992 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.678321 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.678751 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.679375 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.679617 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.680907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.681011 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.681846 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.683156 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsvjm"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.683399 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-encryption-config\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.684107 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.684674 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.685212 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.685636 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.686021 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.686571 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.687665 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.687883 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.688496 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.688570 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.690600 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.692131 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-27n6n"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.695904 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-serving-cert\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.698423 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50913b72-962f-4911-9d29-128636768457-etcd-client\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.705593 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.706390 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707181 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-p46mf"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707205 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-f5k22"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707605 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nmrj8"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707623 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gz49m"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707690 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.707864 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.708047 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.709810 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-cbgpk"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.710967 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.712384 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-84ffc"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.713659 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-sqjq7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.714931 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.716084 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.717188 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zh2lg"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.718403 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.722078 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.723555 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djxrn"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.729019 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.731814 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.733438 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.735878 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.737298 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.739053 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vkn85"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.740931 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.743631 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.745149 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.746742 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.748145 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.748240 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.748965 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.750043 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.751049 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.752093 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.753177 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.754282 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.755671 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.756980 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.757736 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-m8qlz"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.758729 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-27n6n"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.758864 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.759724 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsvjm"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.760966 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.762176 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-m8qlz"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.763312 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-956dv"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.764268 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-956dv" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.764604 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rx688"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.765110 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.765405 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-956dv"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.766559 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rx688"] Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.768233 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775703 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775731 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-service-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775760 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d26nh\" (UniqueName: \"kubernetes.io/projected/9739b566-c925-4c9d-824f-94a8a5b9fb43-kube-api-access-d26nh\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775783 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9739b566-c925-4c9d-824f-94a8a5b9fb43-serving-cert\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775815 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-config\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.775846 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h64jk\" (UniqueName: \"kubernetes.io/projected/1cb28b3e-a772-4541-a845-34fd991c6162-kube-api-access-h64jk\") pod \"downloads-7954f5f757-sqjq7\" (UID: \"1cb28b3e-a772-4541-a845-34fd991c6162\") " pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.777474 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.777981 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-config\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.778007 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9739b566-c925-4c9d-824f-94a8a5b9fb43-service-ca-bundle\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.780296 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9739b566-c925-4c9d-824f-94a8a5b9fb43-serving-cert\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.787723 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.809072 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.827692 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.851195 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.868451 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.887908 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.907812 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.928303 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.954136 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 10:44:02 crc kubenswrapper[4852]: I0129 10:44:02.967953 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.008751 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.028990 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.048514 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.068762 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.088546 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.107947 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.128996 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.148570 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.170217 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.208718 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.229276 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.248529 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.276124 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.288369 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.309002 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.329064 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.349208 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.368666 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.387937 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.408310 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.427859 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.447850 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.467773 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.487894 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.508898 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.528952 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.548812 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.567985 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.588140 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.608453 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.628345 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.648996 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.669855 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.686853 4852 request.go:700] Waited for 1.012851604s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmco-proxy-tls&limit=500&resourceVersion=0 Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.688411 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.725749 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrkb8\" (UniqueName: \"kubernetes.io/projected/50913b72-962f-4911-9d29-128636768457-kube-api-access-qrkb8\") pod \"apiserver-76f77b778f-cbgpk\" (UID: \"50913b72-962f-4911-9d29-128636768457\") " pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.743147 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4tqc\" (UniqueName: \"kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc\") pod \"route-controller-manager-6576b87f9c-k9fzz\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.748976 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.769382 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.773265 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.791910 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.813275 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.828613 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.848634 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.869364 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.888717 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.908420 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.929276 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.949502 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.968780 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 10:44:03 crc kubenswrapper[4852]: I0129 10:44:03.990799 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.011417 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.028745 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.043516 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.048080 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.067976 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.088704 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.110113 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.127883 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.152832 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.168180 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.188476 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.207163 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.207936 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 10:44:04 crc kubenswrapper[4852]: W0129 10:44:04.214764 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5ff2962_1c70_420c_9003_9339c14eca14.slice/crio-c6c3682ad4c6ccead9c46b4767ee676725254ff66f21743d0663b3e92b8feab2 WatchSource:0}: Error finding container c6c3682ad4c6ccead9c46b4767ee676725254ff66f21743d0663b3e92b8feab2: Status 404 returned error can't find the container with id c6c3682ad4c6ccead9c46b4767ee676725254ff66f21743d0663b3e92b8feab2 Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.227930 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.236479 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-cbgpk"] Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.248421 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.269151 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.288549 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.308872 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 10:44:04 crc kubenswrapper[4852]: W0129 10:44:04.312082 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50913b72_962f_4911_9d29_128636768457.slice/crio-8b44ebb89773f02bad70e476924545228be422f1009ad636555e30a544075922 WatchSource:0}: Error finding container 8b44ebb89773f02bad70e476924545228be422f1009ad636555e30a544075922: Status 404 returned error can't find the container with id 8b44ebb89773f02bad70e476924545228be422f1009ad636555e30a544075922 Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.328229 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.348738 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.368828 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.388343 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.411866 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.428452 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.450259 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.469574 4852 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.489017 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.509148 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.529244 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.548889 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.568783 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.588644 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.609314 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.628813 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.667740 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h64jk\" (UniqueName: \"kubernetes.io/projected/1cb28b3e-a772-4541-a845-34fd991c6162-kube-api-access-h64jk\") pod \"downloads-7954f5f757-sqjq7\" (UID: \"1cb28b3e-a772-4541-a845-34fd991c6162\") " pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.692079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d26nh\" (UniqueName: \"kubernetes.io/projected/9739b566-c925-4c9d-824f-94a8a5b9fb43-kube-api-access-d26nh\") pod \"authentication-operator-69f744f599-djxrn\" (UID: \"9739b566-c925-4c9d-824f-94a8a5b9fb43\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697109 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697177 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db75be5c-a8a8-469f-9725-4410fd41379d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxn9q\" (UniqueName: \"kubernetes.io/projected/db75be5c-a8a8-469f-9725-4410fd41379d-kube-api-access-wxn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697256 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697291 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-auth-proxy-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697320 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-policies\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697387 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697417 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b23e6b1-8250-4065-b064-f6bde7189794-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697448 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snrt8\" (UniqueName: \"kubernetes.io/projected/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-kube-api-access-snrt8\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db75be5c-a8a8-469f-9725-4410fd41379d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697532 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-client\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: E0129 10:44:04.697617 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.197596582 +0000 UTC m=+142.414927796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697642 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj2fh\" (UniqueName: \"kubernetes.io/projected/78ccc2c7-2f1f-473e-b3a3-177458532c7e-kube-api-access-xj2fh\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697692 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-metrics-tls\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.697954 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698060 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698127 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698167 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698223 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7glg\" (UniqueName: \"kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698284 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d098239-2446-415a-ae17-a49c5730ce99-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698313 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698381 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698404 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698448 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q8t6\" (UniqueName: \"kubernetes.io/projected/0b23e6b1-8250-4065-b064-f6bde7189794-kube-api-access-7q8t6\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698485 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-config\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698517 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698551 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpqdp\" (UniqueName: \"kubernetes.io/projected/8d098239-2446-415a-ae17-a49c5730ce99-kube-api-access-qpqdp\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698575 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-serving-cert\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698639 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-encryption-config\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698719 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-available-featuregates\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698767 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8r5x\" (UniqueName: \"kubernetes.io/projected/014973b5-c724-49a0-ab5d-2a1a80328f4e-kube-api-access-c8r5x\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698820 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-trusted-ca\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698876 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/92571915-fd75-46d5-ad65-3a61037a42de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698945 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/771b2fec-5cc7-4ada-ae0b-c49346660f81-machine-approver-tls\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698970 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.698999 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-images\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699047 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699076 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rttwq\" (UniqueName: \"kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699194 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-serving-cert\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699220 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-config\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699284 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699320 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d098239-2446-415a-ae17-a49c5730ce99-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699423 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-dir\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699556 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsglv\" (UniqueName: \"kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699731 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7qfb\" (UniqueName: \"kubernetes.io/projected/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-kube-api-access-q7qfb\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699801 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.699901 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b23e6b1-8250-4065-b064-f6bde7189794-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700012 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/78ccc2c7-2f1f-473e-b3a3-177458532c7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700174 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700272 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700333 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700365 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv2dt\" (UniqueName: \"kubernetes.io/projected/771b2fec-5cc7-4ada-ae0b-c49346660f81-kube-api-access-mv2dt\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700395 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700430 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7tvv\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700462 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8416e561-d08a-4708-bf09-e76b1934bdbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700513 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700616 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700679 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700725 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700755 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92571915-fd75-46d5-ad65-3a61037a42de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700808 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700840 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700872 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700901 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/014973b5-c724-49a0-ab5d-2a1a80328f4e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.700930 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/666b4f49-0941-4575-a0e0-dce9819cc7c1-serving-cert\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.701352 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92571915-fd75-46d5-ad65-3a61037a42de-config\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.701407 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.701827 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.701896 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702028 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702066 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702099 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8416e561-d08a-4708-bf09-e76b1934bdbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702133 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcjgz\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-kube-api-access-zcjgz\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702164 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702194 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvpck\" (UniqueName: \"kubernetes.io/projected/6ca2c540-4ca7-4bac-aba9-c14d3da95319-kube-api-access-hvpck\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702225 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702282 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.702319 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcqw4\" (UniqueName: \"kubernetes.io/projected/666b4f49-0941-4575-a0e0-dce9819cc7c1-kube-api-access-pcqw4\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.706852 4852 request.go:700] Waited for 1.506752574s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication-operator/pods/authentication-operator-69f744f599-djxrn Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.803507 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:04 crc kubenswrapper[4852]: E0129 10:44:04.803729 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.303701613 +0000 UTC m=+142.521032747 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.803945 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21463f0e-ee0d-423e-915e-30895dab7f86-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.803969 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804012 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsglv\" (UniqueName: \"kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804036 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804063 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804088 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804118 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw52q\" (UniqueName: \"kubernetes.io/projected/243c7172-ae00-4728-94bf-8ecd9217abbf-kube-api-access-mw52q\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804161 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8416e561-d08a-4708-bf09-e76b1934bdbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpmq2\" (UniqueName: \"kubernetes.io/projected/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-kube-api-access-xpmq2\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804217 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj4gr\" (UniqueName: \"kubernetes.io/projected/4845c49e-2ed9-4760-bdc8-6156ebcae154-kube-api-access-bj4gr\") pod \"migrator-59844c95c7-78gzm\" (UID: \"4845c49e-2ed9-4760-bdc8-6156ebcae154\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804236 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-srv-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804271 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-csi-data-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804294 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804315 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21463f0e-ee0d-423e-915e-30895dab7f86-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804337 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-serving-cert\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804363 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804386 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/014973b5-c724-49a0-ab5d-2a1a80328f4e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-default-certificate\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804420 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7xtp\" (UniqueName: \"kubernetes.io/projected/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-kube-api-access-x7xtp\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804446 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb797a24-aa56-4fff-b691-4c9adae18fc1-serving-cert\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804466 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-service-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92571915-fd75-46d5-ad65-3a61037a42de-config\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb797a24-aa56-4fff-b691-4c9adae18fc1-config\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804529 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a317d59-47f5-449f-b11b-02949dc5daf1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804552 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-plugins-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804596 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804622 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-webhook-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804645 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ccvc\" (UniqueName: \"kubernetes.io/projected/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-kube-api-access-5ccvc\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804665 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-client\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804680 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcqw4\" (UniqueName: \"kubernetes.io/projected/666b4f49-0941-4575-a0e0-dce9819cc7c1-kube-api-access-pcqw4\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804696 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804732 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804753 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db75be5c-a8a8-469f-9725-4410fd41379d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804777 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfmmr\" (UniqueName: \"kubernetes.io/projected/db443f33-63b4-4ba5-b7a2-1578e241f449-kube-api-access-sfmmr\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj7lm\" (UniqueName: \"kubernetes.io/projected/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-kube-api-access-dj7lm\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804823 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-node-bootstrap-token\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.804849 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b23e6b1-8250-4065-b064-f6bde7189794-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.805955 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806008 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db75be5c-a8a8-469f-9725-4410fd41379d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806046 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-client\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806081 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-key\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806157 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-metrics-tls\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806223 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfe3527-e820-4844-82ed-1cec248a4b61-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806597 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.807095 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8416e561-d08a-4708-bf09-e76b1934bdbd-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.807704 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.806259 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: E0129 10:44:04.808654 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.308625854 +0000 UTC m=+142.525956988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.808840 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92571915-fd75-46d5-ad65-3a61037a42de-config\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.809256 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.809466 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.810132 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.810502 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7glg\" (UniqueName: \"kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.810912 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-proxy-tls\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.810918 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.811773 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-client\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.811918 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db75be5c-a8a8-469f-9725-4410fd41379d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.812435 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.812512 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.812563 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-certs\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.812753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.812906 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.813210 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q8t6\" (UniqueName: \"kubernetes.io/projected/0b23e6b1-8250-4065-b064-f6bde7189794-kube-api-access-7q8t6\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.813403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7984r\" (UniqueName: \"kubernetes.io/projected/fb797a24-aa56-4fff-b691-4c9adae18fc1-kube-api-access-7984r\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.813432 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.813569 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b23e6b1-8250-4065-b064-f6bde7189794-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.813732 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.814114 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-serving-cert\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.814156 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8r5x\" (UniqueName: \"kubernetes.io/projected/014973b5-c724-49a0-ab5d-2a1a80328f4e-kube-api-access-c8r5x\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.814196 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jbp5\" (UniqueName: \"kubernetes.io/projected/778ed389-22c2-4030-a586-635e936ef180-kube-api-access-6jbp5\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.814501 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.814618 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815142 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-cabundle\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815374 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/771b2fec-5cc7-4ada-ae0b-c49346660f81-machine-approver-tls\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db75be5c-a8a8-469f-9725-4410fd41379d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-cert\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.815810 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-images\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.816933 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.817033 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-serving-cert\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.817101 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e8e7480-243c-44cf-9d11-9583242f5e1f-metrics-tls\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.817130 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6dcq\" (UniqueName: \"kubernetes.io/projected/016ce5cc-2121-40d8-8e66-0e6e416f64bd-kube-api-access-d6dcq\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.817169 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.818489 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.818923 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.818438 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.821645 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/771b2fec-5cc7-4ada-ae0b-c49346660f81-machine-approver-tls\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.822807 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-images\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.823846 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d098239-2446-415a-ae17-a49c5730ce99-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.823954 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824012 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-srv-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824052 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-dir\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824094 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7qfb\" (UniqueName: \"kubernetes.io/projected/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-kube-api-access-q7qfb\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824170 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e8e7480-243c-44cf-9d11-9583242f5e1f-trusted-ca\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824235 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-images\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824305 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nj4w\" (UniqueName: \"kubernetes.io/projected/f1cc79d4-3d21-4aa4-a814-ed50648761f3-kube-api-access-5nj4w\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824417 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824502 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b23e6b1-8250-4065-b064-f6bde7189794-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824540 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/78ccc2c7-2f1f-473e-b3a3-177458532c7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824644 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv2dt\" (UniqueName: \"kubernetes.io/projected/771b2fec-5cc7-4ada-ae0b-c49346660f81-kube-api-access-mv2dt\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824674 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824753 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824825 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7tvv\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824907 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.824970 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825119 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825189 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825269 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825310 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92571915-fd75-46d5-ad65-3a61037a42de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825347 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21463f0e-ee0d-423e-915e-30895dab7f86-config\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825386 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825407 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825469 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.825568 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-dir\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.827176 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-serving-cert\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828037 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828130 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/666b4f49-0941-4575-a0e0-dce9819cc7c1-serving-cert\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828383 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b23e6b1-8250-4065-b064-f6bde7189794-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828509 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828629 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828665 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gkdj\" (UniqueName: \"kubernetes.io/projected/996c9132-4f08-47d2-ad63-9ac2c3196e95-kube-api-access-4gkdj\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828721 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-registration-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828748 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.828771 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830126 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/014973b5-c724-49a0-ab5d-2a1a80328f4e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830193 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830192 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830235 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830701 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8416e561-d08a-4708-bf09-e76b1934bdbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830737 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcjgz\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-kube-api-access-zcjgz\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-config\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830820 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830915 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvpck\" (UniqueName: \"kubernetes.io/projected/6ca2c540-4ca7-4bac-aba9-c14d3da95319-kube-api-access-hvpck\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830950 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.830986 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfh82\" (UniqueName: \"kubernetes.io/projected/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-kube-api-access-tfh82\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831046 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/996c9132-4f08-47d2-ad63-9ac2c3196e95-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831077 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/778ed389-22c2-4030-a586-635e936ef180-tmpfs\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831112 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxn9q\" (UniqueName: \"kubernetes.io/projected/db75be5c-a8a8-469f-9725-4410fd41379d-kube-api-access-wxn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831240 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831280 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-auth-proxy-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831314 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-policies\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831348 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2k9q\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-kube-api-access-c2k9q\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831384 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7ktx\" (UniqueName: \"kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831413 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831448 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snrt8\" (UniqueName: \"kubernetes.io/projected/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-kube-api-access-snrt8\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831478 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-metrics-certs\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831515 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj2fh\" (UniqueName: \"kubernetes.io/projected/78ccc2c7-2f1f-473e-b3a3-177458532c7e-kube-api-access-xj2fh\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831570 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-metrics-tls\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831620 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-socket-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.831791 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/666b4f49-0941-4575-a0e0-dce9819cc7c1-serving-cert\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833651 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833777 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833808 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833830 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-mountpoint-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833877 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-proxy-tls\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833908 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d098239-2446-415a-ae17-a49c5730ce99-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833945 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a317d59-47f5-449f-b11b-02949dc5daf1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.833972 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834007 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-config\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834097 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-serving-cert\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834207 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvb26\" (UniqueName: \"kubernetes.io/projected/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-kube-api-access-bvb26\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834341 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834381 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpqdp\" (UniqueName: \"kubernetes.io/projected/8d098239-2446-415a-ae17-a49c5730ce99-kube-api-access-qpqdp\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834409 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-encryption-config\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834435 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-available-featuregates\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834458 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-trusted-ca\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834482 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/92571915-fd75-46d5-ad65-3a61037a42de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnwd9\" (UniqueName: \"kubernetes.io/projected/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-kube-api-access-wnwd9\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834534 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-profile-collector-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834559 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-stats-auth\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834631 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834655 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rttwq\" (UniqueName: \"kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834684 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a317d59-47f5-449f-b11b-02949dc5daf1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqrnk\" (UniqueName: \"kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834803 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skkjj\" (UniqueName: \"kubernetes.io/projected/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-kube-api-access-skkjj\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834835 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-config\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834856 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9xsj\" (UniqueName: \"kubernetes.io/projected/2bfe3527-e820-4844-82ed-1cec248a4b61-kube-api-access-s9xsj\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834878 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-service-ca-bundle\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834898 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-config-volume\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834918 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-apiservice-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.834966 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/014973b5-c724-49a0-ab5d-2a1a80328f4e-config\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.835084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-available-featuregates\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.835762 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-config\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.836417 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.837672 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca2c540-4ca7-4bac-aba9-c14d3da95319-audit-policies\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.837866 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.838199 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/666b4f49-0941-4575-a0e0-dce9819cc7c1-trusted-ca\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.838433 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/771b2fec-5cc7-4ada-ae0b-c49346660f81-auth-proxy-config\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.838897 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.839033 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.839183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d098239-2446-415a-ae17-a49c5730ce99-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.839419 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d098239-2446-415a-ae17-a49c5730ce99-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.839661 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.840560 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.840960 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ca2c540-4ca7-4bac-aba9-c14d3da95319-encryption-config\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.841057 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92571915-fd75-46d5-ad65-3a61037a42de-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.841782 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.854224 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.854365 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-metrics-tls\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.854835 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855110 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855293 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/8416e561-d08a-4708-bf09-e76b1934bdbd-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855332 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855408 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855848 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.855926 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/78ccc2c7-2f1f-473e-b3a3-177458532c7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.858190 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.858794 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcqw4\" (UniqueName: \"kubernetes.io/projected/666b4f49-0941-4575-a0e0-dce9819cc7c1-kube-api-access-pcqw4\") pod \"console-operator-58897d9998-nmrj8\" (UID: \"666b4f49-0941-4575-a0e0-dce9819cc7c1\") " pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.877810 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.880277 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7glg\" (UniqueName: \"kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg\") pod \"controller-manager-879f6c89f-h7cn5\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.881255 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsglv\" (UniqueName: \"kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv\") pod \"oauth-openshift-558db77b4-gz49m\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.909986 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q8t6\" (UniqueName: \"kubernetes.io/projected/0b23e6b1-8250-4065-b064-f6bde7189794-kube-api-access-7q8t6\") pod \"kube-storage-version-migrator-operator-b67b599dd-pqjnd\" (UID: \"0b23e6b1-8250-4065-b064-f6bde7189794\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.927212 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8r5x\" (UniqueName: \"kubernetes.io/projected/014973b5-c724-49a0-ab5d-2a1a80328f4e-kube-api-access-c8r5x\") pod \"machine-api-operator-5694c8668f-84ffc\" (UID: \"014973b5-c724-49a0-ab5d-2a1a80328f4e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.935846 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:04 crc kubenswrapper[4852]: E0129 10:44:04.936027 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.435981649 +0000 UTC m=+142.653312803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936080 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-mountpoint-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936131 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-proxy-tls\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936181 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a317d59-47f5-449f-b11b-02949dc5daf1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936223 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvb26\" (UniqueName: \"kubernetes.io/projected/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-kube-api-access-bvb26\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936270 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnwd9\" (UniqueName: \"kubernetes.io/projected/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-kube-api-access-wnwd9\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.936994 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-profile-collector-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937034 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-stats-auth\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937085 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqrnk\" (UniqueName: \"kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937122 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skkjj\" (UniqueName: \"kubernetes.io/projected/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-kube-api-access-skkjj\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937167 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a317d59-47f5-449f-b11b-02949dc5daf1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937200 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9xsj\" (UniqueName: \"kubernetes.io/projected/2bfe3527-e820-4844-82ed-1cec248a4b61-kube-api-access-s9xsj\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937233 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-service-ca-bundle\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937269 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-config-volume\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937297 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-apiservice-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937333 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21463f0e-ee0d-423e-915e-30895dab7f86-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937391 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw52q\" (UniqueName: \"kubernetes.io/projected/243c7172-ae00-4728-94bf-8ecd9217abbf-kube-api-access-mw52q\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937426 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpmq2\" (UniqueName: \"kubernetes.io/projected/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-kube-api-access-xpmq2\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937455 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj4gr\" (UniqueName: \"kubernetes.io/projected/4845c49e-2ed9-4760-bdc8-6156ebcae154-kube-api-access-bj4gr\") pod \"migrator-59844c95c7-78gzm\" (UID: \"4845c49e-2ed9-4760-bdc8-6156ebcae154\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-csi-data-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937519 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-srv-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937542 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-serving-cert\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937571 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21463f0e-ee0d-423e-915e-30895dab7f86-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937653 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-default-certificate\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937674 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7xtp\" (UniqueName: \"kubernetes.io/projected/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-kube-api-access-x7xtp\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937700 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-service-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937733 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb797a24-aa56-4fff-b691-4c9adae18fc1-serving-cert\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937753 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a317d59-47f5-449f-b11b-02949dc5daf1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937778 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb797a24-aa56-4fff-b691-4c9adae18fc1-config\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937836 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-plugins-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937872 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-client\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937907 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-webhook-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ccvc\" (UniqueName: \"kubernetes.io/projected/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-kube-api-access-5ccvc\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.937970 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938000 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj7lm\" (UniqueName: \"kubernetes.io/projected/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-kube-api-access-dj7lm\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938033 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938056 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfmmr\" (UniqueName: \"kubernetes.io/projected/db443f33-63b4-4ba5-b7a2-1578e241f449-kube-api-access-sfmmr\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938083 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-node-bootstrap-token\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938105 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938134 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-key\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938165 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-metrics-tls\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938200 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfe3527-e820-4844-82ed-1cec248a4b61-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938250 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-proxy-tls\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938282 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-certs\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938309 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938332 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7984r\" (UniqueName: \"kubernetes.io/projected/fb797a24-aa56-4fff-b691-4c9adae18fc1-kube-api-access-7984r\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938356 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jbp5\" (UniqueName: \"kubernetes.io/projected/778ed389-22c2-4030-a586-635e936ef180-kube-api-access-6jbp5\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938376 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-cabundle\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938391 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-cert\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938408 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6dcq\" (UniqueName: \"kubernetes.io/projected/016ce5cc-2121-40d8-8e66-0e6e416f64bd-kube-api-access-d6dcq\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938423 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938438 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e8e7480-243c-44cf-9d11-9583242f5e1f-metrics-tls\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938454 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938471 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-srv-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938493 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e8e7480-243c-44cf-9d11-9583242f5e1f-trusted-ca\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938512 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-images\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938528 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nj4w\" (UniqueName: \"kubernetes.io/projected/f1cc79d4-3d21-4aa4-a814-ed50648761f3-kube-api-access-5nj4w\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938552 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938615 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938657 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938686 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938710 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21463f0e-ee0d-423e-915e-30895dab7f86-config\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938728 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-registration-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938803 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gkdj\" (UniqueName: \"kubernetes.io/projected/996c9132-4f08-47d2-ad63-9ac2c3196e95-kube-api-access-4gkdj\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938827 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-config\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938862 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfh82\" (UniqueName: \"kubernetes.io/projected/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-kube-api-access-tfh82\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938882 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/996c9132-4f08-47d2-ad63-9ac2c3196e95-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938901 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/778ed389-22c2-4030-a586-635e936ef180-tmpfs\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938930 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2k9q\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-kube-api-access-c2k9q\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938948 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7ktx\" (UniqueName: \"kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.938979 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-metrics-certs\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.939007 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-socket-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.939373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-socket-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.939438 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-mountpoint-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.943290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-csi-data-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.946462 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb797a24-aa56-4fff-b691-4c9adae18fc1-config\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.949526 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-plugins-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.951098 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-proxy-tls\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.951956 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e8e7480-243c-44cf-9d11-9583242f5e1f-trusted-ca\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.951998 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/db443f33-63b4-4ba5-b7a2-1578e241f449-registration-dir\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.952207 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-images\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.952729 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.952736 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-service-ca-bundle\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.953286 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-config-volume\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.954767 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-config\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.955866 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-apiservice-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: E0129 10:44:04.956321 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.456306052 +0000 UTC m=+142.673637266 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.956472 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-key\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.957882 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.958478 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.958934 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f1cc79d4-3d21-4aa4-a814-ed50648761f3-signing-cabundle\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.960178 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/778ed389-22c2-4030-a586-635e936ef180-tmpfs\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.960245 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/996c9132-4f08-47d2-ad63-9ac2c3196e95-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.960265 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-service-ca\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.960946 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.961450 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21463f0e-ee0d-423e-915e-30895dab7f86-config\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.961563 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-serving-cert\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962033 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-auth-proxy-config\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a317d59-47f5-449f-b11b-02949dc5daf1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962528 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a317d59-47f5-449f-b11b-02949dc5daf1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962574 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfe3527-e820-4844-82ed-1cec248a4b61-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962669 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-srv-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962718 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-stats-auth\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.962694 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-profile-collector-cert\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.963240 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.964227 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.964481 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21463f0e-ee0d-423e-915e-30895dab7f86-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.965448 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-metrics-tls\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.965491 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-proxy-tls\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.967541 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb797a24-aa56-4fff-b691-4c9adae18fc1-serving-cert\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.968023 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-srv-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.968186 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/016ce5cc-2121-40d8-8e66-0e6e416f64bd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.969130 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-certs\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.970297 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.970979 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-node-bootstrap-token\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.971208 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7qfb\" (UniqueName: \"kubernetes.io/projected/4e5b1e5c-9af2-44be-8a29-6a597a5ea609-kube-api-access-q7qfb\") pod \"openshift-config-operator-7777fb866f-p46mf\" (UID: \"4e5b1e5c-9af2-44be-8a29-6a597a5ea609\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.971283 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e8e7480-243c-44cf-9d11-9583242f5e1f-metrics-tls\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.971559 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-cert\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.971651 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/243c7172-ae00-4728-94bf-8ecd9217abbf-etcd-client\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.972142 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-default-certificate\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.974371 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-metrics-certs\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.987125 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/778ed389-22c2-4030-a586-635e936ef180-webhook-cert\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.987698 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.988177 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:04 crc kubenswrapper[4852]: I0129 10:44:04.991089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7tvv\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.013941 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.028020 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.039898 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.040247 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.540219184 +0000 UTC m=+142.757550308 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.040317 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.040407 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.040846 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.540831669 +0000 UTC m=+142.758162803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.051973 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcjgz\" (UniqueName: \"kubernetes.io/projected/8416e561-d08a-4708-bf09-e76b1934bdbd-kube-api-access-zcjgz\") pod \"cluster-image-registry-operator-dc59b4c8b-f2qqd\" (UID: \"8416e561-d08a-4708-bf09-e76b1934bdbd\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.058162 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-sqjq7"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.069312 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv2dt\" (UniqueName: \"kubernetes.io/projected/771b2fec-5cc7-4ada-ae0b-c49346660f81-kube-api-access-mv2dt\") pod \"machine-approver-56656f9798-r2p84\" (UID: \"771b2fec-5cc7-4ada-ae0b-c49346660f81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.085996 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxn9q\" (UniqueName: \"kubernetes.io/projected/db75be5c-a8a8-469f-9725-4410fd41379d-kube-api-access-wxn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-vmqq6\" (UID: \"db75be5c-a8a8-469f-9725-4410fd41379d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.102902 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-nmrj8"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.106853 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/92571915-fd75-46d5-ad65-3a61037a42de-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p8nnl\" (UID: \"92571915-fd75-46d5-ad65-3a61037a42de\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.112976 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.120873 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.125753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpqdp\" (UniqueName: \"kubernetes.io/projected/8d098239-2446-415a-ae17-a49c5730ce99-kube-api-access-qpqdp\") pod \"openshift-apiserver-operator-796bbdcf4f-v8cv7\" (UID: \"8d098239-2446-415a-ae17-a49c5730ce99\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:05 crc kubenswrapper[4852]: W0129 10:44:05.126590 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod666b4f49_0941_4575_a0e0_dce9819cc7c1.slice/crio-c96b78ec48079c22041b7601781fd5a4818cf71f050233a5f522a43f4fde2ebb WatchSource:0}: Error finding container c96b78ec48079c22041b7601781fd5a4818cf71f050233a5f522a43f4fde2ebb: Status 404 returned error can't find the container with id c96b78ec48079c22041b7601781fd5a4818cf71f050233a5f522a43f4fde2ebb Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.141148 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.141556 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.641539436 +0000 UTC m=+142.858870570 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.142499 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.147120 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvpck\" (UniqueName: \"kubernetes.io/projected/6ca2c540-4ca7-4bac-aba9-c14d3da95319-kube-api-access-hvpck\") pod \"apiserver-7bbb656c7d-25c4d\" (UID: \"6ca2c540-4ca7-4bac-aba9-c14d3da95319\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.150400 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.164124 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snrt8\" (UniqueName: \"kubernetes.io/projected/c1fe605f-3fd0-43de-8b26-7ed80ef57b34-kube-api-access-snrt8\") pod \"dns-operator-744455d44c-zh2lg\" (UID: \"c1fe605f-3fd0-43de-8b26-7ed80ef57b34\") " pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.184035 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj2fh\" (UniqueName: \"kubernetes.io/projected/78ccc2c7-2f1f-473e-b3a3-177458532c7e-kube-api-access-xj2fh\") pod \"cluster-samples-operator-665b6dd947-m5b55\" (UID: \"78ccc2c7-2f1f-473e-b3a3-177458532c7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.198497 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.203085 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.203544 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rttwq\" (UniqueName: \"kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq\") pod \"console-f9d7485db-dcnm7\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.206516 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-84ffc"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.211933 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.216928 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-sqjq7" event={"ID":"1cb28b3e-a772-4541-a845-34fd991c6162","Type":"ContainerStarted","Data":"e8d9db731959a433dfeea73a35546cea11abee735f9d015e1ecef56a80893080"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.221258 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.228966 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" event={"ID":"a5ff2962-1c70-420c-9003-9339c14eca14","Type":"ContainerStarted","Data":"82370cd0d3b2bc0cb12918cf0430e7c537f1834667d01367b707b0d4da8d2f98"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.229011 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" event={"ID":"a5ff2962-1c70-420c-9003-9339c14eca14","Type":"ContainerStarted","Data":"c6c3682ad4c6ccead9c46b4767ee676725254ff66f21743d0663b3e92b8feab2"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.230116 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.232811 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" event={"ID":"771b2fec-5cc7-4ada-ae0b-c49346660f81","Type":"ContainerStarted","Data":"c2f28cbb753b9b21129dcc5be6a062cbbec4d8b4a8d777cb6afd7e8d4e4bce1d"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.234275 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" event={"ID":"666b4f49-0941-4575-a0e0-dce9819cc7c1","Type":"ContainerStarted","Data":"c96b78ec48079c22041b7601781fd5a4818cf71f050233a5f522a43f4fde2ebb"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.235918 4852 generic.go:334] "Generic (PLEG): container finished" podID="50913b72-962f-4911-9d29-128636768457" containerID="e7c2f50826343be5c07022ef6947a79353fe72ec5047c77783ee381b4ef4c451" exitCode=0 Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.235950 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" event={"ID":"50913b72-962f-4911-9d29-128636768457","Type":"ContainerDied","Data":"e7c2f50826343be5c07022ef6947a79353fe72ec5047c77783ee381b4ef4c451"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.235968 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" event={"ID":"50913b72-962f-4911-9d29-128636768457","Type":"ContainerStarted","Data":"8b44ebb89773f02bad70e476924545228be422f1009ad636555e30a544075922"} Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.242680 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.243339 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.74331774 +0000 UTC m=+142.960648944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.243469 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-djxrn"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.250189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ccvc\" (UniqueName: \"kubernetes.io/projected/bb8d8805-ce8c-4ce0-b669-c64e7aa85268-kube-api-access-5ccvc\") pod \"router-default-5444994796-tg9p8\" (UID: \"bb8d8805-ce8c-4ce0-b669-c64e7aa85268\") " pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.264494 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj7lm\" (UniqueName: \"kubernetes.io/projected/5933f5b1-89f8-45a4-b9e7-e5375e9a44bd-kube-api-access-dj7lm\") pod \"machine-config-operator-74547568cd-2mfsp\" (UID: \"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.265521 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.275280 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.278464 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.294058 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9xsj\" (UniqueName: \"kubernetes.io/projected/2bfe3527-e820-4844-82ed-1cec248a4b61-kube-api-access-s9xsj\") pod \"package-server-manager-789f6589d5-vjgg4\" (UID: \"2bfe3527-e820-4844-82ed-1cec248a4b61\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:05 crc kubenswrapper[4852]: W0129 10:44:05.305126 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9739b566_c925_4c9d_824f_94a8a5b9fb43.slice/crio-176070b826e4bbce70d59d622f6c6d66bb07ac1d3a72ddfc1a3571a55de125d9 WatchSource:0}: Error finding container 176070b826e4bbce70d59d622f6c6d66bb07ac1d3a72ddfc1a3571a55de125d9: Status 404 returned error can't find the container with id 176070b826e4bbce70d59d622f6c6d66bb07ac1d3a72ddfc1a3571a55de125d9 Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.305504 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvb26\" (UniqueName: \"kubernetes.io/projected/8af2675e-fbb0-4af6-96f2-f9f1bc5ce987-kube-api-access-bvb26\") pod \"machine-config-server-f5k22\" (UID: \"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987\") " pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:05 crc kubenswrapper[4852]: W0129 10:44:05.313977 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43ee8959_7d50_4e2c_93d8_70de85191fc3.slice/crio-03eb5a9f30c906e8c42833f4bce58c2d21b0ed1b6df378ce910432feeaf12063 WatchSource:0}: Error finding container 03eb5a9f30c906e8c42833f4bce58c2d21b0ed1b6df378ce910432feeaf12063: Status 404 returned error can't find the container with id 03eb5a9f30c906e8c42833f4bce58c2d21b0ed1b6df378ce910432feeaf12063 Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.323261 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnwd9\" (UniqueName: \"kubernetes.io/projected/e1a1fdcb-490c-4679-ac26-bb447bf8e47c-kube-api-access-wnwd9\") pod \"ingress-canary-rx688\" (UID: \"e1a1fdcb-490c-4679-ac26-bb447bf8e47c\") " pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.344741 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.345289 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.346083 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.846062307 +0000 UTC m=+143.063393431 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.350315 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.355999 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nj4w\" (UniqueName: \"kubernetes.io/projected/f1cc79d4-3d21-4aa4-a814-ed50648761f3-kube-api-access-5nj4w\") pod \"service-ca-9c57cc56f-27n6n\" (UID: \"f1cc79d4-3d21-4aa4-a814-ed50648761f3\") " pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.359956 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.366565 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.373162 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.375656 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-f5k22" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.386459 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gkdj\" (UniqueName: \"kubernetes.io/projected/996c9132-4f08-47d2-ad63-9ac2c3196e95-kube-api-access-4gkdj\") pod \"multus-admission-controller-857f4d67dd-qsvjm\" (UID: \"996c9132-4f08-47d2-ad63-9ac2c3196e95\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.406128 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqrnk\" (UniqueName: \"kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk\") pod \"collect-profiles-29494710-sztft\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.417114 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rx688" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.417974 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.427780 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfh82\" (UniqueName: \"kubernetes.io/projected/71960dd3-2d8d-42a9-9f60-ef3bd201f17e-kube-api-access-tfh82\") pod \"catalog-operator-68c6474976-svkbv\" (UID: \"71960dd3-2d8d-42a9-9f60-ef3bd201f17e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.432266 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.458537 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/21463f0e-ee0d-423e-915e-30895dab7f86-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9t2zf\" (UID: \"21463f0e-ee0d-423e-915e-30895dab7f86\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.459175 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.459479 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:05.959468268 +0000 UTC m=+143.176799402 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.488374 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpmq2\" (UniqueName: \"kubernetes.io/projected/b9830ebd-d938-4c32-ba4b-9f0ed06808a5-kube-api-access-xpmq2\") pod \"dns-default-956dv\" (UID: \"b9830ebd-d938-4c32-ba4b-9f0ed06808a5\") " pod="openshift-dns/dns-default-956dv" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.488784 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw52q\" (UniqueName: \"kubernetes.io/projected/243c7172-ae00-4728-94bf-8ecd9217abbf-kube-api-access-mw52q\") pod \"etcd-operator-b45778765-vkn85\" (UID: \"243c7172-ae00-4728-94bf-8ecd9217abbf\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.504140 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj4gr\" (UniqueName: \"kubernetes.io/projected/4845c49e-2ed9-4760-bdc8-6156ebcae154-kube-api-access-bj4gr\") pod \"migrator-59844c95c7-78gzm\" (UID: \"4845c49e-2ed9-4760-bdc8-6156ebcae154\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.523356 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skkjj\" (UniqueName: \"kubernetes.io/projected/53eb6e2d-e130-4ef0-8242-d429c1cf2be0-kube-api-access-skkjj\") pod \"control-plane-machine-set-operator-78cbb6b69f-99tmr\" (UID: \"53eb6e2d-e130-4ef0-8242-d429c1cf2be0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.526668 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.541837 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7xtp\" (UniqueName: \"kubernetes.io/projected/c34cf2f3-08b2-48bc-9413-f10ec68d8bd3-kube-api-access-x7xtp\") pod \"machine-config-controller-84d6567774-8qskw\" (UID: \"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.543905 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.559420 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.560079 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.560466 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.060451732 +0000 UTC m=+143.277782866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.571846 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.571881 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gz49m"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.578621 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2k9q\" (UniqueName: \"kubernetes.io/projected/2e8e7480-243c-44cf-9d11-9583242f5e1f-kube-api-access-c2k9q\") pod \"ingress-operator-5b745b69d9-9slq2\" (UID: \"2e8e7480-243c-44cf-9d11-9583242f5e1f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.579931 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-p46mf"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.586074 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.592387 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7ktx\" (UniqueName: \"kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx\") pod \"marketplace-operator-79b997595-b2ghs\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.603790 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.617452 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.626990 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.632360 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfmmr\" (UniqueName: \"kubernetes.io/projected/db443f33-63b4-4ba5-b7a2-1578e241f449-kube-api-access-sfmmr\") pod \"csi-hostpathplugin-m8qlz\" (UID: \"db443f33-63b4-4ba5-b7a2-1578e241f449\") " pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.634342 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.636677 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a317d59-47f5-449f-b11b-02949dc5daf1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7kjqx\" (UID: \"6a317d59-47f5-449f-b11b-02949dc5daf1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:05 crc kubenswrapper[4852]: W0129 10:44:05.638845 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8416e561_d08a_4708_bf09_e76b1934bdbd.slice/crio-4e3e9e87a6494b13214e98984b7c550d772ffa7c6f3a9e7a4d5fff8c3c9b03be WatchSource:0}: Error finding container 4e3e9e87a6494b13214e98984b7c550d772ffa7c6f3a9e7a4d5fff8c3c9b03be: Status 404 returned error can't find the container with id 4e3e9e87a6494b13214e98984b7c550d772ffa7c6f3a9e7a4d5fff8c3c9b03be Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.654031 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7984r\" (UniqueName: \"kubernetes.io/projected/fb797a24-aa56-4fff-b691-4c9adae18fc1-kube-api-access-7984r\") pod \"service-ca-operator-777779d784-4s6c7\" (UID: \"fb797a24-aa56-4fff-b691-4c9adae18fc1\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.661526 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.662311 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.662813 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.162627876 +0000 UTC m=+143.379959010 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.665324 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jbp5\" (UniqueName: \"kubernetes.io/projected/778ed389-22c2-4030-a586-635e936ef180-kube-api-access-6jbp5\") pod \"packageserver-d55dfcdfc-m8wts\" (UID: \"778ed389-22c2-4030-a586-635e936ef180\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.667946 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.685364 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6dcq\" (UniqueName: \"kubernetes.io/projected/016ce5cc-2121-40d8-8e66-0e6e416f64bd-kube-api-access-d6dcq\") pod \"olm-operator-6b444d44fb-8js8l\" (UID: \"016ce5cc-2121-40d8-8e66-0e6e416f64bd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.698135 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.708810 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-956dv" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.764708 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.765365 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.265348393 +0000 UTC m=+143.482679527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.835282 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.870107 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.870550 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.370522951 +0000 UTC m=+143.587854155 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.874751 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.895958 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.909168 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.910749 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.919659 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zh2lg"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.928416 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.946982 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6"] Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.971688 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.971929 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.471888134 +0000 UTC m=+143.689219278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:05 crc kubenswrapper[4852]: I0129 10:44:05.972028 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:05 crc kubenswrapper[4852]: E0129 10:44:05.972379 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.472363526 +0000 UTC m=+143.689694660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.073195 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.073472 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.573442382 +0000 UTC m=+143.790773526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.073995 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.074356 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.574340184 +0000 UTC m=+143.791671318 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.126616 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1fe605f_3fd0_43de_8b26_7ed80ef57b34.slice/crio-0d8a7f2d16ebe0c59baa6790bab9e2d1fb6cc105739691d12e8052f3c98274bf WatchSource:0}: Error finding container 0d8a7f2d16ebe0c59baa6790bab9e2d1fb6cc105739691d12e8052f3c98274bf: Status 404 returned error can't find the container with id 0d8a7f2d16ebe0c59baa6790bab9e2d1fb6cc105739691d12e8052f3c98274bf Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.161653 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb75be5c_a8a8_469f_9725_4410fd41379d.slice/crio-c89e786ae465ec80334a343237c8516973fc4cb7224a8452c178cf8f48aa91d7 WatchSource:0}: Error finding container c89e786ae465ec80334a343237c8516973fc4cb7224a8452c178cf8f48aa91d7: Status 404 returned error can't find the container with id c89e786ae465ec80334a343237c8516973fc4cb7224a8452c178cf8f48aa91d7 Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.176213 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.176612 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.67659694 +0000 UTC m=+143.893928074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.254163 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" event={"ID":"9739b566-c925-4c9d-824f-94a8a5b9fb43","Type":"ContainerStarted","Data":"62e874ace5f00e4bedfe0788977ea339813a1130ae039649939fe4bd8916af77"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.254210 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" event={"ID":"9739b566-c925-4c9d-824f-94a8a5b9fb43","Type":"ContainerStarted","Data":"176070b826e4bbce70d59d622f6c6d66bb07ac1d3a72ddfc1a3571a55de125d9"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.257462 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" event={"ID":"014973b5-c724-49a0-ab5d-2a1a80328f4e","Type":"ContainerStarted","Data":"5b6e0e69d9aba190a196d1180ff51a25bfccc3e2cc1d492351000d4bf61e5398"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.257521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" event={"ID":"014973b5-c724-49a0-ab5d-2a1a80328f4e","Type":"ContainerStarted","Data":"9fb455f9e88f2bec3a6f08e12df9674a51c9354bbf8c81b2a5debc74843bdae7"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.259556 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.261325 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" event={"ID":"a56c210f-0186-49e4-b21c-bf46c22ab3dd","Type":"ContainerStarted","Data":"5ce0246087f288a7d1fffde37b8f4fd74867b0ff32cd670471f7fb7f15186275"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.279684 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" event={"ID":"4e5b1e5c-9af2-44be-8a29-6a597a5ea609","Type":"ContainerStarted","Data":"505e3fda66a24c2dc3a171bbe77fd524b181e89823898fb4d76c026ad2093bc5"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.280197 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.280527 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.780506816 +0000 UTC m=+143.997837950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.281779 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" event={"ID":"43ee8959-7d50-4e2c-93d8-70de85191fc3","Type":"ContainerStarted","Data":"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.281850 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.281865 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" event={"ID":"43ee8959-7d50-4e2c-93d8-70de85191fc3","Type":"ContainerStarted","Data":"03eb5a9f30c906e8c42833f4bce58c2d21b0ed1b6df378ce910432feeaf12063"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.285055 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" event={"ID":"771b2fec-5cc7-4ada-ae0b-c49346660f81","Type":"ContainerStarted","Data":"43e53fa9156d4ff72b4608b87b17f183e6079a112aae4963370b2074ab060085"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.286873 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" event={"ID":"666b4f49-0941-4575-a0e0-dce9819cc7c1","Type":"ContainerStarted","Data":"981340f854474e19cf50419bbf796cf6909e22a381db09ba52030895695b8ba0"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.287314 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.289531 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-sqjq7" event={"ID":"1cb28b3e-a772-4541-a845-34fd991c6162","Type":"ContainerStarted","Data":"6a8263affdf68d72e35c55fbc4e217090ddf46336f1a3713a3b9310075af1abf"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.289726 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.291230 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" event={"ID":"8416e561-d08a-4708-bf09-e76b1934bdbd","Type":"ContainerStarted","Data":"4e3e9e87a6494b13214e98984b7c550d772ffa7c6f3a9e7a4d5fff8c3c9b03be"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.298238 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" event={"ID":"c1fe605f-3fd0-43de-8b26-7ed80ef57b34","Type":"ContainerStarted","Data":"0d8a7f2d16ebe0c59baa6790bab9e2d1fb6cc105739691d12e8052f3c98274bf"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.299406 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" event={"ID":"db75be5c-a8a8-469f-9725-4410fd41379d","Type":"ContainerStarted","Data":"c89e786ae465ec80334a343237c8516973fc4cb7224a8452c178cf8f48aa91d7"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.303013 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tg9p8" event={"ID":"bb8d8805-ce8c-4ce0-b669-c64e7aa85268","Type":"ContainerStarted","Data":"ea97245dfe383860e3f7fc66c8fd57648bf9a209c03cc513af0d7aa17fda1cf9"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.308959 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" event={"ID":"0b23e6b1-8250-4065-b064-f6bde7189794","Type":"ContainerStarted","Data":"f5a80d8a225e62e1b93ba224dcde5f9a3a9416c00b0ef97d27cc4ec04ecd774d"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.310683 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-f5k22" event={"ID":"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987","Type":"ContainerStarted","Data":"e740804e2ed139ec60504bfe2670f934bfc8e5620a28c9c86100004f8bdd97ae"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.314037 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-dcnm7" event={"ID":"bc753007-98cd-4b3b-ab70-6035482f7c5e","Type":"ContainerStarted","Data":"9a721f661ca10efb802fb4dc7e96ad1c66715c47e3d123937aa063edf6bed6f5"} Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.319498 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" podStartSLOduration=123.319480599 podStartE2EDuration="2m3.319480599s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:06.31913475 +0000 UTC m=+143.536465894" watchObservedRunningTime="2026-01-29 10:44:06.319480599 +0000 UTC m=+143.536811733" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.353678 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.353737 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.359160 4852 patch_prober.go:28] interesting pod/console-operator-58897d9998-nmrj8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.359220 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" podUID="666b4f49-0941-4575-a0e0-dce9819cc7c1" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.359295 4852 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-h7cn5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.359314 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.383185 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.383451 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.883427748 +0000 UTC m=+144.100758932 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.383781 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.385203 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.885188781 +0000 UTC m=+144.102519915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.437202 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.484726 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.484937 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.984909905 +0000 UTC m=+144.202241039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.485880 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.498693 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:06.998674205 +0000 UTC m=+144.216005329 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.600106 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.601405 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.101383961 +0000 UTC m=+144.318715095 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.601622 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.602113 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.102096538 +0000 UTC m=+144.319427672 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.634707 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.657362 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.664118 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-27n6n"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.690347 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rx688"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.696841 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.702374 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.703069 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.203027401 +0000 UTC m=+144.420358535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.706697 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.711708 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.718136 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vkn85"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.719197 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf"] Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.785191 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1a1fdcb_490c_4679_ac26_bb447bf8e47c.slice/crio-731ac0a632be9ca372329c041b2829bd9c309535c553a36cfbdab9c3c51265eb WatchSource:0}: Error finding container 731ac0a632be9ca372329c041b2829bd9c309535c553a36cfbdab9c3c51265eb: Status 404 returned error can't find the container with id 731ac0a632be9ca372329c041b2829bd9c309535c553a36cfbdab9c3c51265eb Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.812993 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.813428 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.313415757 +0000 UTC m=+144.530746891 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.853239 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.887000 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.892453 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv"] Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.894708 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm"] Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.896650 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e8e7480_243c_44cf_9d11_9583242f5e1f.slice/crio-a51ac70762ddd1e2cde64ab645573c093b1f854a7d56c30e80e5e89062b23ba2 WatchSource:0}: Error finding container a51ac70762ddd1e2cde64ab645573c093b1f854a7d56c30e80e5e89062b23ba2: Status 404 returned error can't find the container with id a51ac70762ddd1e2cde64ab645573c093b1f854a7d56c30e80e5e89062b23ba2 Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.920467 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:06 crc kubenswrapper[4852]: E0129 10:44:06.920922 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.420903322 +0000 UTC m=+144.638234456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:06 crc kubenswrapper[4852]: I0129 10:44:06.923972 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft"] Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.964842 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4845c49e_2ed9_4760_bdc8_6156ebcae154.slice/crio-aaed33350c74dbefdc5af89416b63d3b9c0c2f76904d57a6cb05a4fa6a9ac6e6 WatchSource:0}: Error finding container aaed33350c74dbefdc5af89416b63d3b9c0c2f76904d57a6cb05a4fa6a9ac6e6: Status 404 returned error can't find the container with id aaed33350c74dbefdc5af89416b63d3b9c0c2f76904d57a6cb05a4fa6a9ac6e6 Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.965249 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb797a24_aa56_4fff_b691_4c9adae18fc1.slice/crio-9749771951f5503340f8f3c5f646aefa379893a2d3f1f38bcc9650ca019ac942 WatchSource:0}: Error finding container 9749771951f5503340f8f3c5f646aefa379893a2d3f1f38bcc9650ca019ac942: Status 404 returned error can't find the container with id 9749771951f5503340f8f3c5f646aefa379893a2d3f1f38bcc9650ca019ac942 Jan 29 10:44:06 crc kubenswrapper[4852]: W0129 10:44:06.990383 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08e29585_8b41_4ac1_94f2_38a45107f4b9.slice/crio-7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb WatchSource:0}: Error finding container 7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb: Status 404 returned error can't find the container with id 7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.025882 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.027320 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.52730316 +0000 UTC m=+144.744634304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.057240 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-956dv"] Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.096374 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:44:07 crc kubenswrapper[4852]: W0129 10:44:07.102326 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9830ebd_d938_4c32_ba4b_9f0ed06808a5.slice/crio-45c855ff1fd993f4cc68f37cd457aed1257b91289320aaee394830add941f0ee WatchSource:0}: Error finding container 45c855ff1fd993f4cc68f37cd457aed1257b91289320aaee394830add941f0ee: Status 404 returned error can't find the container with id 45c855ff1fd993f4cc68f37cd457aed1257b91289320aaee394830add941f0ee Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.106947 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw"] Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.112422 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx"] Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.126878 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.127098 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.627078414 +0000 UTC m=+144.844409558 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.128103 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.628090629 +0000 UTC m=+144.845421763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.128527 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.167331 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-m8qlz"] Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.171163 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts"] Jan 29 10:44:07 crc kubenswrapper[4852]: W0129 10:44:07.179874 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc34cf2f3_08b2_48bc_9413_f10ec68d8bd3.slice/crio-46bbf9d13c77d2f07dc72a5256d465d06c0170b0d30c1b7b2a4622d1a178fe51 WatchSource:0}: Error finding container 46bbf9d13c77d2f07dc72a5256d465d06c0170b0d30c1b7b2a4622d1a178fe51: Status 404 returned error can't find the container with id 46bbf9d13c77d2f07dc72a5256d465d06c0170b0d30c1b7b2a4622d1a178fe51 Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.193239 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsvjm"] Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.199013 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l"] Jan 29 10:44:07 crc kubenswrapper[4852]: W0129 10:44:07.231093 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a317d59_47f5_449f_b11b_02949dc5daf1.slice/crio-55a0b7ce8e9a035641c04bec48cbfe82162c1ec8d799e0948b4dd5f9d8b61886 WatchSource:0}: Error finding container 55a0b7ce8e9a035641c04bec48cbfe82162c1ec8d799e0948b4dd5f9d8b61886: Status 404 returned error can't find the container with id 55a0b7ce8e9a035641c04bec48cbfe82162c1ec8d799e0948b4dd5f9d8b61886 Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.231543 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.231861 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.731845542 +0000 UTC m=+144.949176676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: W0129 10:44:07.245899 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod778ed389_22c2_4030_a586_635e936ef180.slice/crio-e9bdc846c3cef0f1ddabf5a9d6da0c337193eaf5948bc570e6ddfc91fe9aecc9 WatchSource:0}: Error finding container e9bdc846c3cef0f1ddabf5a9d6da0c337193eaf5948bc570e6ddfc91fe9aecc9: Status 404 returned error can't find the container with id e9bdc846c3cef0f1ddabf5a9d6da0c337193eaf5948bc570e6ddfc91fe9aecc9 Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.254919 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" podStartSLOduration=124.254896312 podStartE2EDuration="2m4.254896312s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:07.25483085 +0000 UTC m=+144.472161984" watchObservedRunningTime="2026-01-29 10:44:07.254896312 +0000 UTC m=+144.472227446" Jan 29 10:44:07 crc kubenswrapper[4852]: W0129 10:44:07.267274 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod996c9132_4f08_47d2_ad63_9ac2c3196e95.slice/crio-002e6bca2dc77139884032bf539678bae652d719e5ae5521ec96c5f7cb4bd21e WatchSource:0}: Error finding container 002e6bca2dc77139884032bf539678bae652d719e5ae5521ec96c5f7cb4bd21e: Status 404 returned error can't find the container with id 002e6bca2dc77139884032bf539678bae652d719e5ae5521ec96c5f7cb4bd21e Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.289951 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-sqjq7" podStartSLOduration=124.289933477 podStartE2EDuration="2m4.289933477s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:07.287512737 +0000 UTC m=+144.504843871" watchObservedRunningTime="2026-01-29 10:44:07.289933477 +0000 UTC m=+144.507264611" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.321141 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" podStartSLOduration=124.321122967 podStartE2EDuration="2m4.321122967s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:07.320427589 +0000 UTC m=+144.537758723" watchObservedRunningTime="2026-01-29 10:44:07.321122967 +0000 UTC m=+144.538454101" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.339774 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.340230 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.840218759 +0000 UTC m=+145.057549893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.392645 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" event={"ID":"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd","Type":"ContainerStarted","Data":"a4af3e32eb882a11cb548d8443d17c5e061d81ca6f4b64aadc11777f966d93bb"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.392703 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" event={"ID":"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd","Type":"ContainerStarted","Data":"b7cba775e485f7cf9ef35d97024607cf1583b571bfa3f5a766591e2ff6b8431d"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.399243 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" event={"ID":"778ed389-22c2-4030-a586-635e936ef180","Type":"ContainerStarted","Data":"e9bdc846c3cef0f1ddabf5a9d6da0c337193eaf5948bc570e6ddfc91fe9aecc9"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.407701 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" event={"ID":"4cde3fa3-1fde-45f6-891d-38f98485d443","Type":"ContainerStarted","Data":"9b2ec4e504a172577307d15166a84645e7d55e0817213d5b8e5306b2e2df571f"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.411927 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" event={"ID":"016ce5cc-2121-40d8-8e66-0e6e416f64bd","Type":"ContainerStarted","Data":"d1435a3491a9e764245b5bb86788b4d0d8ea92ff501be9f1421ec8c1dcf224f1"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.415036 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" event={"ID":"243c7172-ae00-4728-94bf-8ecd9217abbf","Type":"ContainerStarted","Data":"0a3bc612b4e8a9d5b50a8dceb55cf7923e01a5d5349f2320a3b4d342c15eec27"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.416169 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" event={"ID":"771b2fec-5cc7-4ada-ae0b-c49346660f81","Type":"ContainerStarted","Data":"69dec5b645815b5d01a7ca525add7e95414edf835bae78bfa45398a8a7999e60"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.418067 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" event={"ID":"2e8e7480-243c-44cf-9d11-9583242f5e1f","Type":"ContainerStarted","Data":"a51ac70762ddd1e2cde64ab645573c093b1f854a7d56c30e80e5e89062b23ba2"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.432183 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" event={"ID":"996c9132-4f08-47d2-ad63-9ac2c3196e95","Type":"ContainerStarted","Data":"002e6bca2dc77139884032bf539678bae652d719e5ae5521ec96c5f7cb4bd21e"} Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.442117 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.942088774 +0000 UTC m=+145.159419918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.441868 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.443780 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.444347 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:07.94433216 +0000 UTC m=+145.161663354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.446901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" event={"ID":"db75be5c-a8a8-469f-9725-4410fd41379d","Type":"ContainerStarted","Data":"3c750bb2fa7c09b4c0e5b1ac1c4d03a03683bf86c23161c4fa74d699bcfb68d4"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.452003 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" event={"ID":"f1cc79d4-3d21-4aa4-a814-ed50648761f3","Type":"ContainerStarted","Data":"8a6168813c128bcb87493e1cf1d0386f5e419316af72b50c11d0581a722b376f"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.453866 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" event={"ID":"f1cc79d4-3d21-4aa4-a814-ed50648761f3","Type":"ContainerStarted","Data":"4e95a5648bbc79886da4a8ae0112dd0e2f60bb95c8df491834f77c1b63e18537"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.456491 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-djxrn" podStartSLOduration=124.45647373 podStartE2EDuration="2m4.45647373s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:07.363188546 +0000 UTC m=+144.580519690" watchObservedRunningTime="2026-01-29 10:44:07.45647373 +0000 UTC m=+144.673804864" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.460691 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" event={"ID":"4845c49e-2ed9-4760-bdc8-6156ebcae154","Type":"ContainerStarted","Data":"aaed33350c74dbefdc5af89416b63d3b9c0c2f76904d57a6cb05a4fa6a9ac6e6"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.491406 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" event={"ID":"08e29585-8b41-4ac1-94f2-38a45107f4b9","Type":"ContainerStarted","Data":"7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.491443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" event={"ID":"2bfe3527-e820-4844-82ed-1cec248a4b61","Type":"ContainerStarted","Data":"7b47e294268002cf5d7a40de69f560a6ee8e93687418cd2bfdd8472f361c1e85"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.491453 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" event={"ID":"2bfe3527-e820-4844-82ed-1cec248a4b61","Type":"ContainerStarted","Data":"ba879919810ac49aac4dc22aa6edad41af99f14b0d1dfc487b1832b8cb67dc98"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.494397 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" event={"ID":"53eb6e2d-e130-4ef0-8242-d429c1cf2be0","Type":"ContainerStarted","Data":"fc1620da055095594866c7854ed8a94e61a9059fc2218c550b161e0f3c836a9d"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.498942 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-f5k22" event={"ID":"8af2675e-fbb0-4af6-96f2-f9f1bc5ce987","Type":"ContainerStarted","Data":"01eb640c4109c843b280d8e2ccd35a44c7c2563f82d141badb6321738e9924e3"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.500787 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" event={"ID":"c1fe605f-3fd0-43de-8b26-7ed80ef57b34","Type":"ContainerStarted","Data":"33aa994d1429aad3f7a9fc9e755e3d8e1762f7991dc2471a1e7bfbd6c683f797"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.503004 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" event={"ID":"78ccc2c7-2f1f-473e-b3a3-177458532c7e","Type":"ContainerStarted","Data":"8eed5689096f6a3e5483a1cced03168a245d8eb0c8f00d480940d99b69eeab9c"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.532019 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" event={"ID":"8416e561-d08a-4708-bf09-e76b1934bdbd","Type":"ContainerStarted","Data":"283b2e180853691ae27fd2b80ded4a517d59664a824e357c158285a5147d2b14"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.547138 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.548049 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.048029581 +0000 UTC m=+145.265360715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.561395 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" event={"ID":"014973b5-c724-49a0-ab5d-2a1a80328f4e","Type":"ContainerStarted","Data":"9bf61489c472b078742dabf635dbc173ff77a1c2e64746d7449069f6fc2e9547"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.584480 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" event={"ID":"92571915-fd75-46d5-ad65-3a61037a42de","Type":"ContainerStarted","Data":"7a5483b10e8718a3c9457a683f50fea3d77bfbc00eb40dd8276cde2deb998cdf"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.584812 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" event={"ID":"92571915-fd75-46d5-ad65-3a61037a42de","Type":"ContainerStarted","Data":"aca1f62a7f52df7f29e600308194caeccb43d6f3a8dadf3425389bade2c0d343"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.592346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" event={"ID":"a56c210f-0186-49e4-b21c-bf46c22ab3dd","Type":"ContainerStarted","Data":"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.593255 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.597691 4852 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gz49m container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" start-of-body= Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.597746 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.607392 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" event={"ID":"db443f33-63b4-4ba5-b7a2-1578e241f449","Type":"ContainerStarted","Data":"90e189576b0acfaf8eed8ec2afd59c38ef05cd98a18f426bc9a238858597186e"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.649039 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.650274 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.150260786 +0000 UTC m=+145.367591920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.655494 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tg9p8" event={"ID":"bb8d8805-ce8c-4ce0-b669-c64e7aa85268","Type":"ContainerStarted","Data":"a64adad24553f7a0efe17f8442df30bc67a560d2af0e8bfd24ed61cbf4644a4a"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.683268 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" event={"ID":"6ca2c540-4ca7-4bac-aba9-c14d3da95319","Type":"ContainerStarted","Data":"8a96d49502463530d44badf29774a495a469746d63fd2daf1a34c838596501a7"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.693483 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-956dv" event={"ID":"b9830ebd-d938-4c32-ba4b-9f0ed06808a5","Type":"ContainerStarted","Data":"45c855ff1fd993f4cc68f37cd457aed1257b91289320aaee394830add941f0ee"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.696434 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" event={"ID":"fb797a24-aa56-4fff-b691-4c9adae18fc1","Type":"ContainerStarted","Data":"9749771951f5503340f8f3c5f646aefa379893a2d3f1f38bcc9650ca019ac942"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.701064 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" event={"ID":"8d098239-2446-415a-ae17-a49c5730ce99","Type":"ContainerStarted","Data":"b2ee2201cc7fb0015daa084324f34e7a937a4de51dfa3a5fec660da6bb9a050d"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.701106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" event={"ID":"8d098239-2446-415a-ae17-a49c5730ce99","Type":"ContainerStarted","Data":"dbcf02645a868de3dfb4b7268e156d3546a0cd043841c0ef0cde53b8427a23a5"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.710130 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rx688" event={"ID":"e1a1fdcb-490c-4679-ac26-bb447bf8e47c","Type":"ContainerStarted","Data":"731ac0a632be9ca372329c041b2829bd9c309535c553a36cfbdab9c3c51265eb"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.718699 4852 generic.go:334] "Generic (PLEG): container finished" podID="4e5b1e5c-9af2-44be-8a29-6a597a5ea609" containerID="3801bacb57333ad441cfa033b52202de31b152504356cffc874e9a90833f08fb" exitCode=0 Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.718773 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" event={"ID":"4e5b1e5c-9af2-44be-8a29-6a597a5ea609","Type":"ContainerDied","Data":"3801bacb57333ad441cfa033b52202de31b152504356cffc874e9a90833f08fb"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.723914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" event={"ID":"0b23e6b1-8250-4065-b064-f6bde7189794","Type":"ContainerStarted","Data":"4e09568b5226e946b38a5f1151f5574cd2ef4962eefe20527a3dc9211303ec95"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.761252 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.761897 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" event={"ID":"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3","Type":"ContainerStarted","Data":"46bbf9d13c77d2f07dc72a5256d465d06c0170b0d30c1b7b2a4622d1a178fe51"} Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.762728 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.262711513 +0000 UTC m=+145.480042647 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.773809 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" event={"ID":"21463f0e-ee0d-423e-915e-30895dab7f86","Type":"ContainerStarted","Data":"6b40f4a7d6f37fe20db4cc106293857fa47837f61b7143646deb5c68a9155eda"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.792992 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" event={"ID":"6a317d59-47f5-449f-b11b-02949dc5daf1","Type":"ContainerStarted","Data":"55a0b7ce8e9a035641c04bec48cbfe82162c1ec8d799e0948b4dd5f9d8b61886"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.803641 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" event={"ID":"50913b72-962f-4911-9d29-128636768457","Type":"ContainerStarted","Data":"16e0c30e3884a1ba81c8df5c367f43c3311b78084cef17c534aa1c0876060a65"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.803694 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" event={"ID":"50913b72-962f-4911-9d29-128636768457","Type":"ContainerStarted","Data":"0642178c417a24daf9df25288bc4dde9b40ed04410e008059a49dfed64ebb475"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.845809 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-dcnm7" event={"ID":"bc753007-98cd-4b3b-ab70-6035482f7c5e","Type":"ContainerStarted","Data":"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.855933 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" event={"ID":"71960dd3-2d8d-42a9-9f60-ef3bd201f17e","Type":"ContainerStarted","Data":"18d04e3f5d064529e4f72415e94597bae1954807e025581d548257d5697c9492"} Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.856624 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.856652 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.857363 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.862904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.864508 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.364490257 +0000 UTC m=+145.581821391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.875499 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.875748 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-nmrj8" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.875816 4852 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-svkbv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.875846 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" podUID="71960dd3-2d8d-42a9-9f60-ef3bd201f17e" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.36:8443/healthz\": dial tcp 10.217.0.36:8443: connect: connection refused" Jan 29 10:44:07 crc kubenswrapper[4852]: I0129 10:44:07.964053 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:07 crc kubenswrapper[4852]: E0129 10:44:07.971828 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.471790087 +0000 UTC m=+145.689121231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.078401 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.079037 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.579025825 +0000 UTC m=+145.796356959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.180020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.180400 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.680385429 +0000 UTC m=+145.897716563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.211020 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" podStartSLOduration=125.211004635 podStartE2EDuration="2m5.211004635s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.21000444 +0000 UTC m=+145.427335574" watchObservedRunningTime="2026-01-29 10:44:08.211004635 +0000 UTC m=+145.428335769" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.267238 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.282089 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:08 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:08 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:08 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.282148 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.283231 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.283540 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.783528256 +0000 UTC m=+146.000859400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.290400 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rx688" podStartSLOduration=6.290384865 podStartE2EDuration="6.290384865s" podCreationTimestamp="2026-01-29 10:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.289796921 +0000 UTC m=+145.507128055" watchObservedRunningTime="2026-01-29 10:44:08.290384865 +0000 UTC m=+145.507715999" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.371058 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-f5k22" podStartSLOduration=6.371039217 podStartE2EDuration="6.371039217s" podCreationTimestamp="2026-01-29 10:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.316311516 +0000 UTC m=+145.533642640" watchObservedRunningTime="2026-01-29 10:44:08.371039217 +0000 UTC m=+145.588370351" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.391145 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.391512 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:08.891492602 +0000 UTC m=+146.108823736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.407095 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-tg9p8" podStartSLOduration=125.407078678 podStartE2EDuration="2m5.407078678s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.404863343 +0000 UTC m=+145.622194477" watchObservedRunningTime="2026-01-29 10:44:08.407078678 +0000 UTC m=+145.624409812" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.500213 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.500844 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.000829852 +0000 UTC m=+146.218160986 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.525693 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" podStartSLOduration=125.525678226 podStartE2EDuration="2m5.525678226s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.524959719 +0000 UTC m=+145.742290853" watchObservedRunningTime="2026-01-29 10:44:08.525678226 +0000 UTC m=+145.743009360" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.526545 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-r2p84" podStartSLOduration=125.526537528 podStartE2EDuration="2m5.526537528s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.438106804 +0000 UTC m=+145.655437938" watchObservedRunningTime="2026-01-29 10:44:08.526537528 +0000 UTC m=+145.743868652" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.578636 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" podStartSLOduration=125.578616124 podStartE2EDuration="2m5.578616124s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.577906477 +0000 UTC m=+145.795237611" watchObservedRunningTime="2026-01-29 10:44:08.578616124 +0000 UTC m=+145.795947248" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.601254 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.601413 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.101385986 +0000 UTC m=+146.318717120 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.601606 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.601874 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.101862858 +0000 UTC m=+146.319193982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.642920 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v8cv7" podStartSLOduration=125.642887381 podStartE2EDuration="2m5.642887381s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.642744608 +0000 UTC m=+145.860075742" watchObservedRunningTime="2026-01-29 10:44:08.642887381 +0000 UTC m=+145.860218515" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.702683 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.702824 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.202789861 +0000 UTC m=+146.420120995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.703319 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.703781 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.203769704 +0000 UTC m=+146.421100838 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.783413 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.783492 4852 csr.go:261] certificate signing request csr-mk4xj is approved, waiting to be issued Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.783700 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.784185 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-dcnm7" podStartSLOduration=125.784165461 podStartE2EDuration="2m5.784165461s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.749418812 +0000 UTC m=+145.966749966" watchObservedRunningTime="2026-01-29 10:44:08.784165461 +0000 UTC m=+146.001496585" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.785686 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-27n6n" podStartSLOduration=125.785680818 podStartE2EDuration="2m5.785680818s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.783806742 +0000 UTC m=+146.001137876" watchObservedRunningTime="2026-01-29 10:44:08.785680818 +0000 UTC m=+146.003011952" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.804953 4852 csr.go:257] certificate signing request csr-mk4xj is issued Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.808988 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.809355 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.309339972 +0000 UTC m=+146.526671106 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.837178 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pqjnd" podStartSLOduration=125.83716278 podStartE2EDuration="2m5.83716278s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.835400525 +0000 UTC m=+146.052731659" watchObservedRunningTime="2026-01-29 10:44:08.83716278 +0000 UTC m=+146.054493914" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.915636 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p8nnl" podStartSLOduration=125.915617857 podStartE2EDuration="2m5.915617857s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.914378896 +0000 UTC m=+146.131710020" watchObservedRunningTime="2026-01-29 10:44:08.915617857 +0000 UTC m=+146.132948991" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.916254 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.916412 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vmqq6" podStartSLOduration=125.916405606 podStartE2EDuration="2m5.916405606s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.89065008 +0000 UTC m=+146.107981224" watchObservedRunningTime="2026-01-29 10:44:08.916405606 +0000 UTC m=+146.133736740" Jan 29 10:44:08 crc kubenswrapper[4852]: E0129 10:44:08.916552 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.416538319 +0000 UTC m=+146.633869463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:08 crc kubenswrapper[4852]: I0129 10:44:08.930191 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-99tmr" event={"ID":"53eb6e2d-e130-4ef0-8242-d429c1cf2be0","Type":"ContainerStarted","Data":"6e86efdedda83ba8e31c8e170c46879dcbce07715098909756124cb91b0a6cc6"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.012465 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" podStartSLOduration=126.012442818 podStartE2EDuration="2m6.012442818s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:08.992633509 +0000 UTC m=+146.209964653" watchObservedRunningTime="2026-01-29 10:44:09.012442818 +0000 UTC m=+146.229773952" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.034805 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.035827 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.535811106 +0000 UTC m=+146.753142240 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.038380 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-84ffc" podStartSLOduration=126.038361528 podStartE2EDuration="2m6.038361528s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.03680314 +0000 UTC m=+146.254134274" watchObservedRunningTime="2026-01-29 10:44:09.038361528 +0000 UTC m=+146.255692662" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.046000 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" event={"ID":"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3","Type":"ContainerStarted","Data":"8a0fc6c40b1ad0e666f9306467334475d4f845e88685ce47e26c7eade4d1a9b2"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.083117 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" event={"ID":"016ce5cc-2121-40d8-8e66-0e6e416f64bd","Type":"ContainerStarted","Data":"3f17ac36439ac3d87231d4f114c190bf3c5edaa1d6f500fdb23014aec863dd0a"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.084112 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.093928 4852 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8js8l container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.093976 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" podUID="016ce5cc-2121-40d8-8e66-0e6e416f64bd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.099924 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" event={"ID":"08e29585-8b41-4ac1-94f2-38a45107f4b9","Type":"ContainerStarted","Data":"89b9c3b22ac43d6002d1a19dc8d2a4c2be4866f79255e4463dc428c0c9c0ecd1"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.114927 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" event={"ID":"2bfe3527-e820-4844-82ed-1cec248a4b61","Type":"ContainerStarted","Data":"f8084ced77323f99176b2f036717642c7d202dcf68b1f6de21eb382545485ae1"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.115038 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.138334 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-f2qqd" podStartSLOduration=126.138315947 podStartE2EDuration="2m6.138315947s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.093701935 +0000 UTC m=+146.311033069" watchObservedRunningTime="2026-01-29 10:44:09.138315947 +0000 UTC m=+146.355647081" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.141564 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.142422 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" event={"ID":"6a317d59-47f5-449f-b11b-02949dc5daf1","Type":"ContainerStarted","Data":"ba43fbb0f11c6489531fb379589d1f8de2cc88395a5207455a228be4d489a0b2"} Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.145135 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.645121655 +0000 UTC m=+146.862452789 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.166243 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" podStartSLOduration=126.166223627 podStartE2EDuration="2m6.166223627s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.140817419 +0000 UTC m=+146.358148553" watchObservedRunningTime="2026-01-29 10:44:09.166223627 +0000 UTC m=+146.383554761" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.167238 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" podStartSLOduration=126.167231711 podStartE2EDuration="2m6.167231711s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.161561681 +0000 UTC m=+146.378892815" watchObservedRunningTime="2026-01-29 10:44:09.167231711 +0000 UTC m=+146.384562835" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.199915 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" event={"ID":"71960dd3-2d8d-42a9-9f60-ef3bd201f17e","Type":"ContainerStarted","Data":"7c18adb811b1ac852326e5d32d6b58386367f970fa7a41a5f6f0a72c71f603c4"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.234202 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-svkbv" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.251060 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" podStartSLOduration=126.251036421 podStartE2EDuration="2m6.251036421s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.21371372 +0000 UTC m=+146.431044864" watchObservedRunningTime="2026-01-29 10:44:09.251036421 +0000 UTC m=+146.468367555" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.251636 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.252510 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.752496067 +0000 UTC m=+146.969827201 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.256050 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" event={"ID":"243c7172-ae00-4728-94bf-8ecd9217abbf","Type":"ContainerStarted","Data":"06f3c899ce68b5ca99355a8c31b3565a76e8d4fac5664bb2a870273e6d64e2ff"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.273370 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-956dv" event={"ID":"b9830ebd-d938-4c32-ba4b-9f0ed06808a5","Type":"ContainerStarted","Data":"681eacc5577ade5b4b7456def03788d76119e29fedec744da1250457663c88c4"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.273513 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:09 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:09 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:09 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.273540 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.280030 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" event={"ID":"fb797a24-aa56-4fff-b691-4c9adae18fc1","Type":"ContainerStarted","Data":"ff07e580d5a2948e8e8dfeeafc3b395cce26de5c8245b37bde0d5df28d4e5dfa"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.288886 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7kjqx" podStartSLOduration=126.288845984 podStartE2EDuration="2m6.288845984s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.250809415 +0000 UTC m=+146.468140549" watchObservedRunningTime="2026-01-29 10:44:09.288845984 +0000 UTC m=+146.506177118" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.298100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" event={"ID":"4e5b1e5c-9af2-44be-8a29-6a597a5ea609","Type":"ContainerStarted","Data":"3b08995c08920891fd9d46802559f0f2f5aa62428a8b2e30bcda8348bedc9cd4"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.298626 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.299914 4852 generic.go:334] "Generic (PLEG): container finished" podID="6ca2c540-4ca7-4bac-aba9-c14d3da95319" containerID="d320a283f97b45bbedf8c2d77292c449a3dd752d518b54b3b665979e14e9b7bd" exitCode=0 Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.299951 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" event={"ID":"6ca2c540-4ca7-4bac-aba9-c14d3da95319","Type":"ContainerDied","Data":"d320a283f97b45bbedf8c2d77292c449a3dd752d518b54b3b665979e14e9b7bd"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.305732 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" event={"ID":"78ccc2c7-2f1f-473e-b3a3-177458532c7e","Type":"ContainerStarted","Data":"1ada38ef5f97bf25237710b782fbd107745b784f1637e79a942e28d1f482a547"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.305776 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" event={"ID":"78ccc2c7-2f1f-473e-b3a3-177458532c7e","Type":"ContainerStarted","Data":"4bddca6fe92ef08d7ee07b5f73464ffa65849925abbab34432156146b04740b7"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.307463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" event={"ID":"21463f0e-ee0d-423e-915e-30895dab7f86","Type":"ContainerStarted","Data":"29f6ecdecf1b08a167fa6a96bd1e1cac6a9f9f0ad99a7c9fccc9d0721786c45a"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.308966 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" event={"ID":"2e8e7480-243c-44cf-9d11-9583242f5e1f","Type":"ContainerStarted","Data":"6ffc922054c98561f1f9084a7ab81d79d47229e1592863e851a29948e0afb341"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.308986 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" event={"ID":"2e8e7480-243c-44cf-9d11-9583242f5e1f","Type":"ContainerStarted","Data":"2f1a4e081db8a8c1600c6e04654c6d279099579f462d88e44c528891f09e8db9"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.322900 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" event={"ID":"778ed389-22c2-4030-a586-635e936ef180","Type":"ContainerStarted","Data":"93ee9ca609a942f0721fc32c35436bc2b5ecd9f5a49448cd54e67c9a99547c4f"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.323738 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.328722 4852 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-m8wts container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" start-of-body= Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.328777 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" podUID="778ed389-22c2-4030-a586-635e936ef180" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.347460 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4s6c7" podStartSLOduration=126.347445342 podStartE2EDuration="2m6.347445342s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.345293279 +0000 UTC m=+146.562624423" watchObservedRunningTime="2026-01-29 10:44:09.347445342 +0000 UTC m=+146.564776476" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.353206 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.355548 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.855530822 +0000 UTC m=+147.072861956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.361901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" event={"ID":"4cde3fa3-1fde-45f6-891d-38f98485d443","Type":"ContainerStarted","Data":"094822b659fe8774a41c75d4bea31fcd35e0399b8a18671044178bb751e0c890"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.363043 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.370900 4852 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-b2ghs container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.370948 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.388664 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-vkn85" podStartSLOduration=126.38864943 podStartE2EDuration="2m6.38864943s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.378213692 +0000 UTC m=+146.595544826" watchObservedRunningTime="2026-01-29 10:44:09.38864943 +0000 UTC m=+146.605980564" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.390665 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rx688" event={"ID":"e1a1fdcb-490c-4679-ac26-bb447bf8e47c","Type":"ContainerStarted","Data":"35a1a54f04547759153f0d4d26674c168b482b5605359d81620ed6470554f49c"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.421457 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" podStartSLOduration=126.42143997 podStartE2EDuration="2m6.42143997s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.421147992 +0000 UTC m=+146.638479126" watchObservedRunningTime="2026-01-29 10:44:09.42143997 +0000 UTC m=+146.638771104" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.431617 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" event={"ID":"c1fe605f-3fd0-43de-8b26-7ed80ef57b34","Type":"ContainerStarted","Data":"7df91bf5165e991a8773a4189aa309057deab60f23c85a2b9d233bb4655a90e2"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.458841 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.458989 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.958966397 +0000 UTC m=+147.176297531 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.459467 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.466231 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:09.966217425 +0000 UTC m=+147.183548559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.506739 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" event={"ID":"996c9132-4f08-47d2-ad63-9ac2c3196e95","Type":"ContainerStarted","Data":"a73e11357ebe4927a24f75dea472a9e8668ec43badd241598bcef272490d17d4"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.506781 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" event={"ID":"4845c49e-2ed9-4760-bdc8-6156ebcae154","Type":"ContainerStarted","Data":"23d285cdbc290027b8d3eb021759b72ab9ac339a21d999d6f2d92ad1b4b028e5"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.506795 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" event={"ID":"4845c49e-2ed9-4760-bdc8-6156ebcae154","Type":"ContainerStarted","Data":"8e80feae2513f1313d2ece4846fb068e10bed755febe937244256ba076422bf6"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.506807 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" event={"ID":"5933f5b1-89f8-45a4-b9e7-e5375e9a44bd","Type":"ContainerStarted","Data":"0dabc387fcfbc91a6ef9ff5e5c3bf73f8fc399305a092de99a6a32d2342cf360"} Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.530856 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m5b55" podStartSLOduration=126.530838131 podStartE2EDuration="2m6.530838131s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.502727937 +0000 UTC m=+146.720059071" watchObservedRunningTime="2026-01-29 10:44:09.530838131 +0000 UTC m=+146.748169265" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.563091 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.567394 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.067370573 +0000 UTC m=+147.284701707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.568087 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" podStartSLOduration=126.568072861 podStartE2EDuration="2m6.568072861s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.566180034 +0000 UTC m=+146.783511168" watchObservedRunningTime="2026-01-29 10:44:09.568072861 +0000 UTC m=+146.785403995" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.569096 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9t2zf" podStartSLOduration=126.569090956 podStartE2EDuration="2m6.569090956s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.530944834 +0000 UTC m=+146.748275968" watchObservedRunningTime="2026-01-29 10:44:09.569090956 +0000 UTC m=+146.786422090" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.636568 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9slq2" podStartSLOduration=126.636548093 podStartE2EDuration="2m6.636548093s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.603326212 +0000 UTC m=+146.820657346" watchObservedRunningTime="2026-01-29 10:44:09.636548093 +0000 UTC m=+146.853879227" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.637015 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" podStartSLOduration=126.637011014 podStartE2EDuration="2m6.637011014s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.636114991 +0000 UTC m=+146.853446125" watchObservedRunningTime="2026-01-29 10:44:09.637011014 +0000 UTC m=+146.854342148" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.667173 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.735069 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.235053955 +0000 UTC m=+147.452385089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.771127 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.771442 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.271426763 +0000 UTC m=+147.488757897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.798169 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-78gzm" podStartSLOduration=126.798150293 podStartE2EDuration="2m6.798150293s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.741599437 +0000 UTC m=+146.958930571" watchObservedRunningTime="2026-01-29 10:44:09.798150293 +0000 UTC m=+147.015481427" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.818374 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-29 10:39:08 +0000 UTC, rotation deadline is 2026-10-19 15:26:55.106707089 +0000 UTC Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.818439 4852 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6316h42m45.288271444s for next certificate rotation Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.877233 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.877725 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.377713198 +0000 UTC m=+147.595044332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.977979 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:09 crc kubenswrapper[4852]: E0129 10:44:09.978308 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.478293853 +0000 UTC m=+147.695624987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.980660 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-2mfsp" podStartSLOduration=126.98064729 podStartE2EDuration="2m6.98064729s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.980120048 +0000 UTC m=+147.197451182" watchObservedRunningTime="2026-01-29 10:44:09.98064729 +0000 UTC m=+147.197978424" Jan 29 10:44:09 crc kubenswrapper[4852]: I0129 10:44:09.981552 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-zh2lg" podStartSLOduration=126.981547413 podStartE2EDuration="2m6.981547413s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:09.799609749 +0000 UTC m=+147.016940903" watchObservedRunningTime="2026-01-29 10:44:09.981547413 +0000 UTC m=+147.198878547" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.001611 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.079719 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.080056 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.580031275 +0000 UTC m=+147.797362409 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.180984 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.181163 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.681130332 +0000 UTC m=+147.898461476 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.181259 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.181541 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.681533192 +0000 UTC m=+147.898864336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.274267 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:10 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:10 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:10 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.274344 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.282850 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.283015 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.782985837 +0000 UTC m=+148.000316981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.283226 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.283622 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.783609503 +0000 UTC m=+148.000940637 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.384381 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.384608 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.884561366 +0000 UTC m=+148.101892500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.384845 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.385172 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.885158081 +0000 UTC m=+148.102489225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.486251 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.486439 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.986412772 +0000 UTC m=+148.203743906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.486640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.486968 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:10.986959105 +0000 UTC m=+148.204290239 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.518892 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" event={"ID":"db443f33-63b4-4ba5-b7a2-1578e241f449","Type":"ContainerStarted","Data":"083f6d30b7d5f40cb01a710e88e6c1e103e89866960753a4f0c1125fc89f49a6"} Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.523388 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-956dv" event={"ID":"b9830ebd-d938-4c32-ba4b-9f0ed06808a5","Type":"ContainerStarted","Data":"e76cdd2f1e2bc2216189e11c39be7f303ba17eeb11d21f026500f702d4e1cf64"} Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.523545 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-956dv" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.534302 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" event={"ID":"996c9132-4f08-47d2-ad63-9ac2c3196e95","Type":"ContainerStarted","Data":"81862a5af91b975fe9dd1696f91e9dcb5671730432a2beda0cfa505dc524e6fc"} Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.541189 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" event={"ID":"6ca2c540-4ca7-4bac-aba9-c14d3da95319","Type":"ContainerStarted","Data":"04a4ef0e16b32a1f5dcb9357385b5cdffd07d2b16ea4cf322f651c1c270d8489"} Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.548204 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" event={"ID":"c34cf2f3-08b2-48bc-9413-f10ec68d8bd3","Type":"ContainerStarted","Data":"16b50fe290751f78a867b90787c7a2c22c71dbdba9b661064a4491fa350815ab"} Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.549367 4852 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-b2ghs container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.549418 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.550348 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-956dv" podStartSLOduration=8.55033248 podStartE2EDuration="8.55033248s" podCreationTimestamp="2026-01-29 10:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:10.548730861 +0000 UTC m=+147.766062005" watchObservedRunningTime="2026-01-29 10:44:10.55033248 +0000 UTC m=+147.767663614" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.578214 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsvjm" podStartSLOduration=127.578198428 podStartE2EDuration="2m7.578198428s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:10.57661871 +0000 UTC m=+147.793949854" watchObservedRunningTime="2026-01-29 10:44:10.578198428 +0000 UTC m=+147.795529562" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.588697 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.589084 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.089063347 +0000 UTC m=+148.306394481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.624177 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-m8wts" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.624362 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8js8l" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.641531 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" podStartSLOduration=127.641515832 podStartE2EDuration="2m7.641515832s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:10.640456957 +0000 UTC m=+147.857788091" watchObservedRunningTime="2026-01-29 10:44:10.641515832 +0000 UTC m=+147.858846966" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.694165 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.697360 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.197341291 +0000 UTC m=+148.414672515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.783059 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8qskw" podStartSLOduration=127.783039887 podStartE2EDuration="2m7.783039887s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:10.737205086 +0000 UTC m=+147.954536220" watchObservedRunningTime="2026-01-29 10:44:10.783039887 +0000 UTC m=+148.000371021" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.797899 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.798141 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.298093479 +0000 UTC m=+148.515424623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.798448 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.798827 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.298816878 +0000 UTC m=+148.516148022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.899899 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.900117 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.400087218 +0000 UTC m=+148.617418352 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.900224 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:10 crc kubenswrapper[4852]: E0129 10:44:10.900623 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.400605961 +0000 UTC m=+148.617937105 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.931189 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.932752 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.935120 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 10:44:10 crc kubenswrapper[4852]: I0129 10:44:10.940690 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.001384 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.001614 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.501569265 +0000 UTC m=+148.718900399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.001795 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.002125 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.502111718 +0000 UTC m=+148.719442852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.102566 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.102754 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.602715703 +0000 UTC m=+148.820046837 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.102809 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.103120 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.103129 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.603115933 +0000 UTC m=+148.820447057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.103162 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqc9l\" (UniqueName: \"kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.103196 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.104752 4852 patch_prober.go:28] interesting pod/apiserver-76f77b778f-cbgpk container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]log ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]etcd ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/max-in-flight-filter ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 29 10:44:11 crc kubenswrapper[4852]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [-]poststarthook/project.openshift.io-projectcache failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 29 10:44:11 crc kubenswrapper[4852]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [-]poststarthook/openshift.io-restmapperupdater failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 10:44:11 crc kubenswrapper[4852]: livez check failed Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.105015 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" podUID="50913b72-962f-4911-9d29-128636768457" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.123100 4852 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-p46mf container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.123153 4852 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-p46mf container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.123157 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" podUID="4e5b1e5c-9af2-44be-8a29-6a597a5ea609" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.123212 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" podUID="4e5b1e5c-9af2-44be-8a29-6a597a5ea609" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.124162 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fbk7g"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.125070 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.166952 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fbk7g"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.180155 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.204494 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.204648 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.70462273 +0000 UTC m=+148.921953864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.204696 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.204787 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m2dn\" (UniqueName: \"kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.204840 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.204943 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.204983 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.704971679 +0000 UTC m=+148.922302813 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.205067 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.205110 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqc9l\" (UniqueName: \"kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.205145 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.206019 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.206078 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.242032 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqc9l\" (UniqueName: \"kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l\") pod \"community-operators-znvks\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.250850 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.280261 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:11 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:11 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.280324 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.306208 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.306454 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.806427954 +0000 UTC m=+149.023759118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.306499 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m2dn\" (UniqueName: \"kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.306575 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.306686 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.307210 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.307695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.338128 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-84t6n"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.339020 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.355650 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m2dn\" (UniqueName: \"kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn\") pod \"certified-operators-fbk7g\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.380423 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-84t6n"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.407799 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.407849 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.408186 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:11.908175557 +0000 UTC m=+149.125506681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.409318 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.438922 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509274 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509513 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpcpp\" (UniqueName: \"kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509563 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509607 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509650 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509680 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.509705 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.514094 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.013839706 +0000 UTC m=+149.231170860 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.515485 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.516112 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c75mb"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.517832 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.519249 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.522027 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.527458 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.532419 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c75mb"] Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.580072 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" event={"ID":"db443f33-63b4-4ba5-b7a2-1578e241f449","Type":"ContainerStarted","Data":"325e7ca97b155098f6ecb82c2a46ee2449a4dff1b003295533a0b7dbc2f5934a"} Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.591288 4852 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-b2ghs container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.591322 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616638 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616693 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r4vs\" (UniqueName: \"kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616752 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616799 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616833 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpcpp\" (UniqueName: \"kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616871 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.616898 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.617302 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.617366 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.617466 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.117453105 +0000 UTC m=+149.334784349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.644109 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpcpp\" (UniqueName: \"kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp\") pod \"community-operators-84t6n\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.682921 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.721063 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.722187 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.722325 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.722469 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r4vs\" (UniqueName: \"kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.723389 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.223365511 +0000 UTC m=+149.440696645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.725244 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.730271 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.776625 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r4vs\" (UniqueName: \"kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs\") pod \"certified-operators-c75mb\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.792927 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.796729 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.823108 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.823397 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.323386972 +0000 UTC m=+149.540718106 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.852416 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.872886 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.924713 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:11 crc kubenswrapper[4852]: E0129 10:44:11.925116 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.425101284 +0000 UTC m=+149.642432418 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:11 crc kubenswrapper[4852]: I0129 10:44:11.976730 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.032986 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.034246 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.534232289 +0000 UTC m=+149.751563423 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.128413 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.129150 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.132844 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.133006 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.133365 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.133615 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.133679 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.633665615 +0000 UTC m=+149.850996749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.167893 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-p46mf" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.260980 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.261385 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.261485 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.279028 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.779011325 +0000 UTC m=+149.996342459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.283089 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fbk7g"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.285863 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:12 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:12 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:12 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.285925 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.348633 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-84t6n"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.362808 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.363001 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.363062 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.363119 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.363178 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.863162873 +0000 UTC m=+150.080494007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.392155 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.463946 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.464726 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:12.964711531 +0000 UTC m=+150.182042665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.485281 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.539211 4852 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.566056 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.566199 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:13.066156656 +0000 UTC m=+150.283487790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.566320 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.566627 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 10:44:13.066615627 +0000 UTC m=+150.283946761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tf7qq" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.621332 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" event={"ID":"db443f33-63b4-4ba5-b7a2-1578e241f449","Type":"ContainerStarted","Data":"3bb0400ee37b9953c070b305959cf88fb12480d849d8a801cfa5e08deae0ec68"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.623848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"1f2e4ca2c6a975f776141cc212402d74735396cf3fc595c3775980ad2c22646d"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.644883 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"30ac610463b61e171c7993b5b97294f848a3a8cf7ee0ba7a0624e8863d59d556"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.658041 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerStarted","Data":"8b075aa92e5a4b6e31f9702e5285dcf20a48849d5df65e54651f6b98c5dcfb68"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.659352 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"19adafafd843785957ec62a2e1e7d9db663b62d999130f705f7ac11b567891dc"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.662731 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerStarted","Data":"1f1b9dca9b5a9a541af4d76129aa30d8299e983947eaa5441509fd784fd1c872"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.679488 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerStarted","Data":"eeca353be9446424ca300a3698d7eb6471b7c5a63200ea1ee0ee9e921e3e8e01"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.679725 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerStarted","Data":"bbff974d38cca45c6c88a04dad7cdda69f53baf63c754f10cda20d007e200d94"} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.682140 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.686380 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:12 crc kubenswrapper[4852]: E0129 10:44:12.688392 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 10:44:13.188369624 +0000 UTC m=+150.405700758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.720040 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c75mb"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.723131 4852 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-29T10:44:12.539230392Z","Handler":null,"Name":""} Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.725989 4852 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.726024 4852 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 29 10:44:12 crc kubenswrapper[4852]: W0129 10:44:12.755467 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb545ab65_c670_46ea_9f48_518113c3387e.slice/crio-d9b84b4796f4b246b89b767d24e9f97592d3fe0005bfd3dde6f98cb9767996d7 WatchSource:0}: Error finding container d9b84b4796f4b246b89b767d24e9f97592d3fe0005bfd3dde6f98cb9767996d7: Status 404 returned error can't find the container with id d9b84b4796f4b246b89b767d24e9f97592d3fe0005bfd3dde6f98cb9767996d7 Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.775680 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.789871 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.797135 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.797175 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.829881 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tf7qq\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.891178 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.908188 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.913671 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qmx6z"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.921568 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.924343 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.924598 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qmx6z"] Jan 29 10:44:12 crc kubenswrapper[4852]: I0129 10:44:12.967700 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.093644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.093715 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gvrr\" (UniqueName: \"kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.093767 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.150278 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.195318 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gvrr\" (UniqueName: \"kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.195383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.195439 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.195848 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.196081 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.216913 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gvrr\" (UniqueName: \"kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr\") pod \"redhat-marketplace-qmx6z\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.258636 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.269358 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:13 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:13 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:13 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.269410 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.312306 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wpbx8"] Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.313546 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.319453 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wpbx8"] Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.398775 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h8ng\" (UniqueName: \"kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.398853 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.398888 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.481195 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.486070 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qmx6z"] Jan 29 10:44:13 crc kubenswrapper[4852]: W0129 10:44:13.491031 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1329aa3_b350_44b9_959e_ce3730a07103.slice/crio-133f8926f1c679d57de2edab5fb05d0ea5a299abc6871201ebd97601063ae11b WatchSource:0}: Error finding container 133f8926f1c679d57de2edab5fb05d0ea5a299abc6871201ebd97601063ae11b: Status 404 returned error can't find the container with id 133f8926f1c679d57de2edab5fb05d0ea5a299abc6871201ebd97601063ae11b Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.499785 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h8ng\" (UniqueName: \"kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.499857 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.499889 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.500359 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.500797 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.522625 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h8ng\" (UniqueName: \"kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng\") pod \"redhat-marketplace-wpbx8\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.647926 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.698811 4852 generic.go:334] "Generic (PLEG): container finished" podID="b545ab65-c670-46ea-9f48-518113c3387e" containerID="3647c0e5f5583d66b7bb5e860b70b8d58f9e4be42de2882a3c117e6b720ecf58" exitCode=0 Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.699342 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerDied","Data":"3647c0e5f5583d66b7bb5e860b70b8d58f9e4be42de2882a3c117e6b720ecf58"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.699398 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerStarted","Data":"d9b84b4796f4b246b89b767d24e9f97592d3fe0005bfd3dde6f98cb9767996d7"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.702028 4852 generic.go:334] "Generic (PLEG): container finished" podID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerID="eeca353be9446424ca300a3698d7eb6471b7c5a63200ea1ee0ee9e921e3e8e01" exitCode=0 Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.702128 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerDied","Data":"eeca353be9446424ca300a3698d7eb6471b7c5a63200ea1ee0ee9e921e3e8e01"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.705125 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" event={"ID":"ee27437f-db20-4337-813d-aaa57c3a95d5","Type":"ContainerStarted","Data":"f34307266f43e050fadb9eadd23785daf8113f78d6ae7c54ee9b4e748e8f3ce4"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.705172 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" event={"ID":"ee27437f-db20-4337-813d-aaa57c3a95d5","Type":"ContainerStarted","Data":"fa69c46a25e23ab97b5fac4a0c7606dd0d6d67d1c900465ea590cb5321591d5e"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.705246 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.728291 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" event={"ID":"db443f33-63b4-4ba5-b7a2-1578e241f449","Type":"ContainerStarted","Data":"8422015e6029dd50b9d78857d9e7ef2a3f412e2d99b211633819a502a857942a"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.730574 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerID="a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972" exitCode=0 Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.730635 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerDied","Data":"a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.732821 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"ae2c5ee8f09ec07fc10a9895c563cf12acb74c2a4b54c344b833dbf8124d6f2d"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.735113 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0b1613a7e0b85cba4df72dd0b9806eb93d0dae3d8c56628b9f2066e009c387ac"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.735293 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.737914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"34189ac0-6ab7-42c6-a3e8-afe1fbdee347","Type":"ContainerStarted","Data":"a79b74473b1addaf7a04db9f899935689f6a7da1b706beafb48e361dab81213d"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.737952 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"34189ac0-6ab7-42c6-a3e8-afe1fbdee347","Type":"ContainerStarted","Data":"88fff0250226dccd42fbd8f431f1ef3c6e50cf7a3afa98f08d4e973a251f2110"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.739456 4852 generic.go:334] "Generic (PLEG): container finished" podID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerID="4d8ccaa58f656f7d4d2b7c78b7707e0938b9e2750af5eec48143c9e68d262af8" exitCode=0 Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.739506 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerDied","Data":"4d8ccaa58f656f7d4d2b7c78b7707e0938b9e2750af5eec48143c9e68d262af8"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.748001 4852 generic.go:334] "Generic (PLEG): container finished" podID="d1329aa3-b350-44b9-959e-ce3730a07103" containerID="a6094befa4d7db6a987015d0e69a94a7a6cd1aa62cedbfb9f2a4a97791ec265f" exitCode=0 Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.748081 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerDied","Data":"a6094befa4d7db6a987015d0e69a94a7a6cd1aa62cedbfb9f2a4a97791ec265f"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.748106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerStarted","Data":"133f8926f1c679d57de2edab5fb05d0ea5a299abc6871201ebd97601063ae11b"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.749256 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" podStartSLOduration=130.749236316 podStartE2EDuration="2m10.749236316s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:13.745402501 +0000 UTC m=+150.962733675" watchObservedRunningTime="2026-01-29 10:44:13.749236316 +0000 UTC m=+150.966567450" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.755920 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"6e7c060021004d2ef8092f4b98582ce1981521dc7f618ce3fcbf986ee8aae7ce"} Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.779025 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.787341 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-cbgpk" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.812534 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=1.812508138 podStartE2EDuration="1.812508138s" podCreationTimestamp="2026-01-29 10:44:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:13.783999134 +0000 UTC m=+151.001330268" watchObservedRunningTime="2026-01-29 10:44:13.812508138 +0000 UTC m=+151.029839272" Jan 29 10:44:13 crc kubenswrapper[4852]: I0129 10:44:13.847624 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-m8qlz" podStartSLOduration=11.847604995 podStartE2EDuration="11.847604995s" podCreationTimestamp="2026-01-29 10:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:13.845495643 +0000 UTC m=+151.062826797" watchObservedRunningTime="2026-01-29 10:44:13.847604995 +0000 UTC m=+151.064936129" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.112629 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-n2zwn"] Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.120674 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.123907 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.131087 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n2zwn"] Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.134072 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wpbx8"] Jan 29 10:44:14 crc kubenswrapper[4852]: W0129 10:44:14.163374 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb3421a0_7b6e_40b6_9f51_df284594f711.slice/crio-ca088a1bb61ef2716eb58d8e0c3c43c17137cc35b5ba1273370e2d728885462d WatchSource:0}: Error finding container ca088a1bb61ef2716eb58d8e0c3c43c17137cc35b5ba1273370e2d728885462d: Status 404 returned error can't find the container with id ca088a1bb61ef2716eb58d8e0c3c43c17137cc35b5ba1273370e2d728885462d Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.226438 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.226540 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z64v\" (UniqueName: \"kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.226592 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.276879 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:14 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:14 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:14 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.277200 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.329654 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z64v\" (UniqueName: \"kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.329756 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.329851 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.330429 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.330462 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.355524 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z64v\" (UniqueName: \"kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v\") pod \"redhat-operators-n2zwn\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.445702 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.513945 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p7gmn"] Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.516168 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.524251 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7gmn"] Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.636421 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.636774 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.636804 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcnzx\" (UniqueName: \"kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.716023 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n2zwn"] Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.737717 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.737748 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.737768 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcnzx\" (UniqueName: \"kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.738283 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.738316 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.759210 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcnzx\" (UniqueName: \"kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx\") pod \"redhat-operators-p7gmn\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.784647 4852 generic.go:334] "Generic (PLEG): container finished" podID="34189ac0-6ab7-42c6-a3e8-afe1fbdee347" containerID="a79b74473b1addaf7a04db9f899935689f6a7da1b706beafb48e361dab81213d" exitCode=0 Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.784721 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"34189ac0-6ab7-42c6-a3e8-afe1fbdee347","Type":"ContainerDied","Data":"a79b74473b1addaf7a04db9f899935689f6a7da1b706beafb48e361dab81213d"} Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.785815 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerStarted","Data":"c6fb2202cc654f910d94c6894918d0f193783e7aafd81e9f75c11ad72c0e3f94"} Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.790967 4852 generic.go:334] "Generic (PLEG): container finished" podID="08e29585-8b41-4ac1-94f2-38a45107f4b9" containerID="89b9c3b22ac43d6002d1a19dc8d2a4c2be4866f79255e4463dc428c0c9c0ecd1" exitCode=0 Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.791025 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" event={"ID":"08e29585-8b41-4ac1-94f2-38a45107f4b9","Type":"ContainerDied","Data":"89b9c3b22ac43d6002d1a19dc8d2a4c2be4866f79255e4463dc428c0c9c0ecd1"} Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.793895 4852 generic.go:334] "Generic (PLEG): container finished" podID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerID="00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b" exitCode=0 Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.794019 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerDied","Data":"00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b"} Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.794056 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerStarted","Data":"ca088a1bb61ef2716eb58d8e0c3c43c17137cc35b5ba1273370e2d728885462d"} Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.860237 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.860298 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.861151 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.861179 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:14 crc kubenswrapper[4852]: I0129 10:44:14.889486 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.163349 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7gmn"] Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.222722 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.223314 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.224665 4852 patch_prober.go:28] interesting pod/console-f9d7485db-dcnm7 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.224710 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-dcnm7" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.268166 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.272121 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 10:44:15 crc kubenswrapper[4852]: [-]has-synced failed: reason withheld Jan 29 10:44:15 crc kubenswrapper[4852]: [+]process-running ok Jan 29 10:44:15 crc kubenswrapper[4852]: healthz check failed Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.272161 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.373946 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.374001 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.381334 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.667524 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.836667 4852 generic.go:334] "Generic (PLEG): container finished" podID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerID="dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa" exitCode=0 Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.836743 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerDied","Data":"dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa"} Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.836773 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerStarted","Data":"28ab38bf66df497c688df67390c098da56d80abf0629626b193443cde6116dea"} Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.847567 4852 generic.go:334] "Generic (PLEG): container finished" podID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerID="1c8f9bd05a3c416282e731a485924ad30dbc5fae7e76e8ae7f9f086fbc159852" exitCode=0 Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.848835 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerDied","Data":"1c8f9bd05a3c416282e731a485924ad30dbc5fae7e76e8ae7f9f086fbc159852"} Jan 29 10:44:15 crc kubenswrapper[4852]: I0129 10:44:15.857329 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-25c4d" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.277395 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.288964 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-tg9p8" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.332178 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.433051 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.482238 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir\") pod \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.482314 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access\") pod \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\" (UID: \"34189ac0-6ab7-42c6-a3e8-afe1fbdee347\") " Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.483320 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "34189ac0-6ab7-42c6-a3e8-afe1fbdee347" (UID: "34189ac0-6ab7-42c6-a3e8-afe1fbdee347"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.492274 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "34189ac0-6ab7-42c6-a3e8-afe1fbdee347" (UID: "34189ac0-6ab7-42c6-a3e8-afe1fbdee347"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.583130 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume\") pod \"08e29585-8b41-4ac1-94f2-38a45107f4b9\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.583220 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume\") pod \"08e29585-8b41-4ac1-94f2-38a45107f4b9\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.583276 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqrnk\" (UniqueName: \"kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk\") pod \"08e29585-8b41-4ac1-94f2-38a45107f4b9\" (UID: \"08e29585-8b41-4ac1-94f2-38a45107f4b9\") " Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.583569 4852 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.583605 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/34189ac0-6ab7-42c6-a3e8-afe1fbdee347-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.584907 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume" (OuterVolumeSpecName: "config-volume") pod "08e29585-8b41-4ac1-94f2-38a45107f4b9" (UID: "08e29585-8b41-4ac1-94f2-38a45107f4b9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.593309 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "08e29585-8b41-4ac1-94f2-38a45107f4b9" (UID: "08e29585-8b41-4ac1-94f2-38a45107f4b9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.593466 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk" (OuterVolumeSpecName: "kube-api-access-lqrnk") pod "08e29585-8b41-4ac1-94f2-38a45107f4b9" (UID: "08e29585-8b41-4ac1-94f2-38a45107f4b9"). InnerVolumeSpecName "kube-api-access-lqrnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.685437 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e29585-8b41-4ac1-94f2-38a45107f4b9-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.685473 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqrnk\" (UniqueName: \"kubernetes.io/projected/08e29585-8b41-4ac1-94f2-38a45107f4b9-kube-api-access-lqrnk\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.685483 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e29585-8b41-4ac1-94f2-38a45107f4b9-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.859944 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.860016 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft" event={"ID":"08e29585-8b41-4ac1-94f2-38a45107f4b9","Type":"ContainerDied","Data":"7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb"} Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.860052 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dd0acd5da36bdd139bf85bdcd3bffeea92a588caeb72a96a10695a2a8f925bb" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.861777 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"34189ac0-6ab7-42c6-a3e8-afe1fbdee347","Type":"ContainerDied","Data":"88fff0250226dccd42fbd8f431f1ef3c6e50cf7a3afa98f08d4e973a251f2110"} Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.861809 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88fff0250226dccd42fbd8f431f1ef3c6e50cf7a3afa98f08d4e973a251f2110" Jan 29 10:44:16 crc kubenswrapper[4852]: I0129 10:44:16.861918 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.472671 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 10:44:17 crc kubenswrapper[4852]: E0129 10:44:17.472938 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e29585-8b41-4ac1-94f2-38a45107f4b9" containerName="collect-profiles" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.472953 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e29585-8b41-4ac1-94f2-38a45107f4b9" containerName="collect-profiles" Jan 29 10:44:17 crc kubenswrapper[4852]: E0129 10:44:17.472973 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34189ac0-6ab7-42c6-a3e8-afe1fbdee347" containerName="pruner" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.472981 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="34189ac0-6ab7-42c6-a3e8-afe1fbdee347" containerName="pruner" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.473124 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e29585-8b41-4ac1-94f2-38a45107f4b9" containerName="collect-profiles" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.473139 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="34189ac0-6ab7-42c6-a3e8-afe1fbdee347" containerName="pruner" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.473630 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.476055 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.476222 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.485199 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.599638 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.599687 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.701179 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.701284 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.701335 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.718179 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:17 crc kubenswrapper[4852]: I0129 10:44:17.827471 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:18 crc kubenswrapper[4852]: I0129 10:44:18.199356 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 10:44:18 crc kubenswrapper[4852]: W0129 10:44:18.229662 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod039d5def_83b9_44d6_a9f4_758eae8b47ed.slice/crio-0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec WatchSource:0}: Error finding container 0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec: Status 404 returned error can't find the container with id 0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec Jan 29 10:44:18 crc kubenswrapper[4852]: I0129 10:44:18.882690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"039d5def-83b9-44d6-a9f4-758eae8b47ed","Type":"ContainerStarted","Data":"fb04900146e345980cd8a961a130e713e39ce9b756e073f62b505bdfafd22c9c"} Jan 29 10:44:18 crc kubenswrapper[4852]: I0129 10:44:18.883010 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"039d5def-83b9-44d6-a9f4-758eae8b47ed","Type":"ContainerStarted","Data":"0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec"} Jan 29 10:44:19 crc kubenswrapper[4852]: I0129 10:44:19.903896 4852 generic.go:334] "Generic (PLEG): container finished" podID="039d5def-83b9-44d6-a9f4-758eae8b47ed" containerID="fb04900146e345980cd8a961a130e713e39ce9b756e073f62b505bdfafd22c9c" exitCode=0 Jan 29 10:44:19 crc kubenswrapper[4852]: I0129 10:44:19.903954 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"039d5def-83b9-44d6-a9f4-758eae8b47ed","Type":"ContainerDied","Data":"fb04900146e345980cd8a961a130e713e39ce9b756e073f62b505bdfafd22c9c"} Jan 29 10:44:20 crc kubenswrapper[4852]: I0129 10:44:20.715140 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-956dv" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.248023 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.392140 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access\") pod \"039d5def-83b9-44d6-a9f4-758eae8b47ed\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.392411 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir\") pod \"039d5def-83b9-44d6-a9f4-758eae8b47ed\" (UID: \"039d5def-83b9-44d6-a9f4-758eae8b47ed\") " Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.392554 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "039d5def-83b9-44d6-a9f4-758eae8b47ed" (UID: "039d5def-83b9-44d6-a9f4-758eae8b47ed"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.398357 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "039d5def-83b9-44d6-a9f4-758eae8b47ed" (UID: "039d5def-83b9-44d6-a9f4-758eae8b47ed"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.494939 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/039d5def-83b9-44d6-a9f4-758eae8b47ed-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.494973 4852 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/039d5def-83b9-44d6-a9f4-758eae8b47ed-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.930763 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"039d5def-83b9-44d6-a9f4-758eae8b47ed","Type":"ContainerDied","Data":"0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec"} Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.930798 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e22e8db3bd3b66cd495837e9ab7cce58999848eecbdb5e561eb3baf0dceacec" Jan 29 10:44:21 crc kubenswrapper[4852]: I0129 10:44:21.930846 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 10:44:24 crc kubenswrapper[4852]: I0129 10:44:24.859245 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:24 crc kubenswrapper[4852]: I0129 10:44:24.859872 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:24 crc kubenswrapper[4852]: I0129 10:44:24.859359 4852 patch_prober.go:28] interesting pod/downloads-7954f5f757-sqjq7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 10:44:24 crc kubenswrapper[4852]: I0129 10:44:24.859936 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-sqjq7" podUID="1cb28b3e-a772-4541-a845-34fd991c6162" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 10:44:25 crc kubenswrapper[4852]: I0129 10:44:25.227009 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:25 crc kubenswrapper[4852]: I0129 10:44:25.231803 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:44:26 crc kubenswrapper[4852]: I0129 10:44:26.169675 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:44:26 crc kubenswrapper[4852]: I0129 10:44:26.177078 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2d44fabb-f3c2-4492-9ab4-567a81928ccc-metrics-certs\") pod \"network-metrics-daemon-bqdnv\" (UID: \"2d44fabb-f3c2-4492-9ab4-567a81928ccc\") " pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:44:26 crc kubenswrapper[4852]: I0129 10:44:26.212375 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bqdnv" Jan 29 10:44:30 crc kubenswrapper[4852]: I0129 10:44:30.018834 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:44:30 crc kubenswrapper[4852]: I0129 10:44:30.018928 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:44:32 crc kubenswrapper[4852]: I0129 10:44:32.974938 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:44:34 crc kubenswrapper[4852]: I0129 10:44:34.871059 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-sqjq7" Jan 29 10:44:45 crc kubenswrapper[4852]: I0129 10:44:45.349806 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vjgg4" Jan 29 10:44:50 crc kubenswrapper[4852]: E0129 10:44:50.651547 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 10:44:50 crc kubenswrapper[4852]: E0129 10:44:50.652068 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qpcpp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-84t6n_openshift-marketplace(d433f87b-087a-47e4-93e2-0754097e5b1b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:50 crc kubenswrapper[4852]: E0129 10:44:50.653203 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-84t6n" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" Jan 29 10:44:51 crc kubenswrapper[4852]: E0129 10:44:51.637313 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 10:44:51 crc kubenswrapper[4852]: E0129 10:44:51.637462 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zqc9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-znvks_openshift-marketplace(28b1ffef-b64a-4b00-a08f-60535f0fef60): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:51 crc kubenswrapper[4852]: E0129 10:44:51.639286 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-znvks" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.029990 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 10:44:52 crc kubenswrapper[4852]: E0129 10:44:52.114688 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 10:44:52 crc kubenswrapper[4852]: E0129 10:44:52.114876 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8r4vs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-c75mb_openshift-marketplace(b545ab65-c670-46ea-9f48-518113c3387e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:52 crc kubenswrapper[4852]: E0129 10:44:52.116103 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-c75mb" podUID="b545ab65-c670-46ea-9f48-518113c3387e" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.871637 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 10:44:52 crc kubenswrapper[4852]: E0129 10:44:52.871904 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="039d5def-83b9-44d6-a9f4-758eae8b47ed" containerName="pruner" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.871920 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="039d5def-83b9-44d6-a9f4-758eae8b47ed" containerName="pruner" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.872082 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="039d5def-83b9-44d6-a9f4-758eae8b47ed" containerName="pruner" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.872546 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.882890 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.883017 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 10:44:52 crc kubenswrapper[4852]: I0129 10:44:52.892079 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.037056 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.037356 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.139122 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.139241 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.139330 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.158533 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: I0129 10:44:53.202428 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.753893 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-znvks" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.753896 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-84t6n" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.753913 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-c75mb" podUID="b545ab65-c670-46ea-9f48-518113c3387e" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.858208 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.858228 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.858351 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5h8ng,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-wpbx8_openshift-marketplace(eb3421a0-7b6e-40b6-9f51-df284594f711): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.858375 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gvrr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-qmx6z_openshift-marketplace(d1329aa3-b350-44b9-959e-ce3730a07103): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.859512 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-wpbx8" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" Jan 29 10:44:53 crc kubenswrapper[4852]: E0129 10:44:53.859556 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-qmx6z" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.071552 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.073412 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.089038 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.197756 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.197803 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.197821 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.298855 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.298907 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.298925 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.299016 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.299307 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.318548 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access\") pod \"installer-9-crc\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.398945 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:44:57 crc kubenswrapper[4852]: E0129 10:44:57.588559 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-qmx6z" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" Jan 29 10:44:57 crc kubenswrapper[4852]: E0129 10:44:57.588691 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-wpbx8" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" Jan 29 10:44:57 crc kubenswrapper[4852]: I0129 10:44:57.834306 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 10:44:57 crc kubenswrapper[4852]: E0129 10:44:57.857375 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 10:44:57 crc kubenswrapper[4852]: E0129 10:44:57.857521 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kcnzx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-p7gmn_openshift-marketplace(92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:57 crc kubenswrapper[4852]: E0129 10:44:57.858752 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-p7gmn" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" Jan 29 10:44:58 crc kubenswrapper[4852]: I0129 10:44:58.096339 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bqdnv"] Jan 29 10:44:58 crc kubenswrapper[4852]: I0129 10:44:58.099097 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 10:44:58 crc kubenswrapper[4852]: I0129 10:44:58.218446 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44","Type":"ContainerStarted","Data":"579053b2158b9c7655ec974e140106ebb39a254597e5be3fedc19dc6a2f8680d"} Jan 29 10:44:58 crc kubenswrapper[4852]: E0129 10:44:58.370836 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 10:44:58 crc kubenswrapper[4852]: E0129 10:44:58.371000 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4m2dn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-fbk7g_openshift-marketplace(4ce2c002-428f-4b74-b25a-b9e4c9ed11d9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 10:44:58 crc kubenswrapper[4852]: E0129 10:44:58.372315 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-fbk7g" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" Jan 29 10:44:58 crc kubenswrapper[4852]: E0129 10:44:58.558929 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-p7gmn" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" Jan 29 10:44:58 crc kubenswrapper[4852]: W0129 10:44:58.560321 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda48672b8_4f28_46bd_9840_f37fddf2d95f.slice/crio-c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9 WatchSource:0}: Error finding container c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9: Status 404 returned error can't find the container with id c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9 Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.228502 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" event={"ID":"2d44fabb-f3c2-4492-9ab4-567a81928ccc","Type":"ContainerStarted","Data":"3e3662a5dd32d22667cae7ea5dde3e47d8a8351fa891396e1c9008833cb40f62"} Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.228822 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" event={"ID":"2d44fabb-f3c2-4492-9ab4-567a81928ccc","Type":"ContainerStarted","Data":"2905af1426dce9788ee951fbf23145101b99e1ac0a0318bbea0f4164f665e4c8"} Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.231433 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44","Type":"ContainerStarted","Data":"b5643a24c8bf0a8ae59caca4e7af8ba2763111ce1d0f361b994950335da9c8c6"} Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.233943 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a48672b8-4f28-46bd-9840-f37fddf2d95f","Type":"ContainerStarted","Data":"a818544a97c042ab04309b188e68e23625a06be2e87996727f3fa7e2644ee4f4"} Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.233991 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a48672b8-4f28-46bd-9840-f37fddf2d95f","Type":"ContainerStarted","Data":"c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9"} Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.237718 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerStarted","Data":"a9a00da4276d87b03e729bc6aece7515facd8588f3e943d4943cf016f740dc0d"} Jan 29 10:44:59 crc kubenswrapper[4852]: E0129 10:44:59.239743 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-fbk7g" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.252567 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.252545084 podStartE2EDuration="2.252545084s" podCreationTimestamp="2026-01-29 10:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:59.250253307 +0000 UTC m=+196.467584451" watchObservedRunningTime="2026-01-29 10:44:59.252545084 +0000 UTC m=+196.469876218" Jan 29 10:44:59 crc kubenswrapper[4852]: I0129 10:44:59.309714 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=7.309695746 podStartE2EDuration="7.309695746s" podCreationTimestamp="2026-01-29 10:44:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:44:59.292753037 +0000 UTC m=+196.510084171" watchObservedRunningTime="2026-01-29 10:44:59.309695746 +0000 UTC m=+196.527026880" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.016792 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.017255 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.142521 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc"] Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.143413 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.145527 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.149563 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.154512 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc"] Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.240497 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.240612 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtjdv\" (UniqueName: \"kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.240671 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.244919 4852 generic.go:334] "Generic (PLEG): container finished" podID="a48672b8-4f28-46bd-9840-f37fddf2d95f" containerID="a818544a97c042ab04309b188e68e23625a06be2e87996727f3fa7e2644ee4f4" exitCode=0 Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.245002 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a48672b8-4f28-46bd-9840-f37fddf2d95f","Type":"ContainerDied","Data":"a818544a97c042ab04309b188e68e23625a06be2e87996727f3fa7e2644ee4f4"} Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.247055 4852 generic.go:334] "Generic (PLEG): container finished" podID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerID="a9a00da4276d87b03e729bc6aece7515facd8588f3e943d4943cf016f740dc0d" exitCode=0 Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.247158 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerDied","Data":"a9a00da4276d87b03e729bc6aece7515facd8588f3e943d4943cf016f740dc0d"} Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.252510 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bqdnv" event={"ID":"2d44fabb-f3c2-4492-9ab4-567a81928ccc","Type":"ContainerStarted","Data":"2ab5d1a0b24bea42fef1b4d4a1e9f0eb49eec84d2607df95fe0bf9b04f25c83d"} Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.296991 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-bqdnv" podStartSLOduration=177.296948238 podStartE2EDuration="2m57.296948238s" podCreationTimestamp="2026-01-29 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:45:00.295030681 +0000 UTC m=+197.512361815" watchObservedRunningTime="2026-01-29 10:45:00.296948238 +0000 UTC m=+197.514279372" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.341870 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.342289 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtjdv\" (UniqueName: \"kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.342358 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.343410 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.352455 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.365773 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtjdv\" (UniqueName: \"kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv\") pod \"collect-profiles-29494725-n4kzc\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.460045 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:00 crc kubenswrapper[4852]: I0129 10:45:00.644039 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc"] Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.266111 4852 generic.go:334] "Generic (PLEG): container finished" podID="c6996b7b-e91f-4806-875c-b579f9aa9211" containerID="ada10b056cbe728387762aa69aeaa372c9890784752d9f9038f3259b272234ea" exitCode=0 Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.268010 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" event={"ID":"c6996b7b-e91f-4806-875c-b579f9aa9211","Type":"ContainerDied","Data":"ada10b056cbe728387762aa69aeaa372c9890784752d9f9038f3259b272234ea"} Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.268066 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" event={"ID":"c6996b7b-e91f-4806-875c-b579f9aa9211","Type":"ContainerStarted","Data":"c0016e4e4df19ad5bf3c85af552427d6999c105417512eec23f4181821dfefa2"} Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.533943 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.663676 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir\") pod \"a48672b8-4f28-46bd-9840-f37fddf2d95f\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.663867 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a48672b8-4f28-46bd-9840-f37fddf2d95f" (UID: "a48672b8-4f28-46bd-9840-f37fddf2d95f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.664358 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access\") pod \"a48672b8-4f28-46bd-9840-f37fddf2d95f\" (UID: \"a48672b8-4f28-46bd-9840-f37fddf2d95f\") " Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.664801 4852 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a48672b8-4f28-46bd-9840-f37fddf2d95f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.677178 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a48672b8-4f28-46bd-9840-f37fddf2d95f" (UID: "a48672b8-4f28-46bd-9840-f37fddf2d95f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:01 crc kubenswrapper[4852]: I0129 10:45:01.765657 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a48672b8-4f28-46bd-9840-f37fddf2d95f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.272688 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.272786 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a48672b8-4f28-46bd-9840-f37fddf2d95f","Type":"ContainerDied","Data":"c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9"} Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.272816 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0e7065ba83af55e9dd6cb1f4e2c64e78386920ba48ea2a402f13da8949d6eb9" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.275662 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerStarted","Data":"bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047"} Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.294893 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-n2zwn" podStartSLOduration=2.728809493 podStartE2EDuration="48.294873191s" podCreationTimestamp="2026-01-29 10:44:14 +0000 UTC" firstStartedPulling="2026-01-29 10:44:15.850985874 +0000 UTC m=+153.068317008" lastFinishedPulling="2026-01-29 10:45:01.417049572 +0000 UTC m=+198.634380706" observedRunningTime="2026-01-29 10:45:02.289929226 +0000 UTC m=+199.507260380" watchObservedRunningTime="2026-01-29 10:45:02.294873191 +0000 UTC m=+199.512204325" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.508965 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.687959 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtjdv\" (UniqueName: \"kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv\") pod \"c6996b7b-e91f-4806-875c-b579f9aa9211\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.688006 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume\") pod \"c6996b7b-e91f-4806-875c-b579f9aa9211\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.688033 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume\") pod \"c6996b7b-e91f-4806-875c-b579f9aa9211\" (UID: \"c6996b7b-e91f-4806-875c-b579f9aa9211\") " Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.688919 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume" (OuterVolumeSpecName: "config-volume") pod "c6996b7b-e91f-4806-875c-b579f9aa9211" (UID: "c6996b7b-e91f-4806-875c-b579f9aa9211"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.694703 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv" (OuterVolumeSpecName: "kube-api-access-qtjdv") pod "c6996b7b-e91f-4806-875c-b579f9aa9211" (UID: "c6996b7b-e91f-4806-875c-b579f9aa9211"). InnerVolumeSpecName "kube-api-access-qtjdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.694722 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c6996b7b-e91f-4806-875c-b579f9aa9211" (UID: "c6996b7b-e91f-4806-875c-b579f9aa9211"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.790941 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtjdv\" (UniqueName: \"kubernetes.io/projected/c6996b7b-e91f-4806-875c-b579f9aa9211-kube-api-access-qtjdv\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.791077 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6996b7b-e91f-4806-875c-b579f9aa9211-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:02 crc kubenswrapper[4852]: I0129 10:45:02.791093 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6996b7b-e91f-4806-875c-b579f9aa9211-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:03 crc kubenswrapper[4852]: I0129 10:45:03.281501 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" Jan 29 10:45:03 crc kubenswrapper[4852]: I0129 10:45:03.281624 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc" event={"ID":"c6996b7b-e91f-4806-875c-b579f9aa9211","Type":"ContainerDied","Data":"c0016e4e4df19ad5bf3c85af552427d6999c105417512eec23f4181821dfefa2"} Jan 29 10:45:03 crc kubenswrapper[4852]: I0129 10:45:03.282826 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0016e4e4df19ad5bf3c85af552427d6999c105417512eec23f4181821dfefa2" Jan 29 10:45:04 crc kubenswrapper[4852]: I0129 10:45:04.446623 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:04 crc kubenswrapper[4852]: I0129 10:45:04.446864 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:05 crc kubenswrapper[4852]: I0129 10:45:05.754272 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n2zwn" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" probeResult="failure" output=< Jan 29 10:45:05 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 10:45:05 crc kubenswrapper[4852]: > Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.334987 4852 generic.go:334] "Generic (PLEG): container finished" podID="b545ab65-c670-46ea-9f48-518113c3387e" containerID="b465866d7ec5630c69479b9f172bd31060bcf00b547b02b0eaa064282c88e503" exitCode=0 Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.335104 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerDied","Data":"b465866d7ec5630c69479b9f172bd31060bcf00b547b02b0eaa064282c88e503"} Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.345872 4852 generic.go:334] "Generic (PLEG): container finished" podID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerID="8024d201ac960d59c21982f00efd17da80e51180c9dac89e5542f94788da5c88" exitCode=0 Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.345972 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerDied","Data":"8024d201ac960d59c21982f00efd17da80e51180c9dac89e5542f94788da5c88"} Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.348626 4852 generic.go:334] "Generic (PLEG): container finished" podID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerID="787da8a511f35cdd8e56036cebdf5d2449d905221b782e48fe866c4ba51f18de" exitCode=0 Jan 29 10:45:13 crc kubenswrapper[4852]: I0129 10:45:13.348678 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerDied","Data":"787da8a511f35cdd8e56036cebdf5d2449d905221b782e48fe866c4ba51f18de"} Jan 29 10:45:14 crc kubenswrapper[4852]: I0129 10:45:14.521779 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:14 crc kubenswrapper[4852]: I0129 10:45:14.559813 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.364226 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerStarted","Data":"4da40245ff800aec0d7d2217df7a72cd7a6c44f21930b98dfa35555f88ca1027"} Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.365732 4852 generic.go:334] "Generic (PLEG): container finished" podID="d1329aa3-b350-44b9-959e-ce3730a07103" containerID="63065226604e71ec27ed2ba96dfdd95d9755e0b6eacf00db0bb69aea99f75a5f" exitCode=0 Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.365799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerDied","Data":"63065226604e71ec27ed2ba96dfdd95d9755e0b6eacf00db0bb69aea99f75a5f"} Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.367941 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerStarted","Data":"5e29417c37b504fbeb44d187e5b1bb9bdbba87bc1562e9fb534cd03174a3bee7"} Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.369393 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerStarted","Data":"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009"} Jan 29 10:45:16 crc kubenswrapper[4852]: I0129 10:45:16.384348 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-84t6n" podStartSLOduration=4.477042121 podStartE2EDuration="1m5.384333317s" podCreationTimestamp="2026-01-29 10:44:11 +0000 UTC" firstStartedPulling="2026-01-29 10:44:13.740824168 +0000 UTC m=+150.958155302" lastFinishedPulling="2026-01-29 10:45:14.648115364 +0000 UTC m=+211.865446498" observedRunningTime="2026-01-29 10:45:16.383346532 +0000 UTC m=+213.600677666" watchObservedRunningTime="2026-01-29 10:45:16.384333317 +0000 UTC m=+213.601664451" Jan 29 10:45:17 crc kubenswrapper[4852]: I0129 10:45:17.375044 4852 generic.go:334] "Generic (PLEG): container finished" podID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerID="e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009" exitCode=0 Jan 29 10:45:17 crc kubenswrapper[4852]: I0129 10:45:17.375122 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerDied","Data":"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009"} Jan 29 10:45:17 crc kubenswrapper[4852]: I0129 10:45:17.393416 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-znvks" podStartSLOduration=5.428045877 podStartE2EDuration="1m7.393401394s" podCreationTimestamp="2026-01-29 10:44:10 +0000 UTC" firstStartedPulling="2026-01-29 10:44:12.681853914 +0000 UTC m=+149.899185048" lastFinishedPulling="2026-01-29 10:45:14.647209431 +0000 UTC m=+211.864540565" observedRunningTime="2026-01-29 10:45:17.390769108 +0000 UTC m=+214.608100262" watchObservedRunningTime="2026-01-29 10:45:17.393401394 +0000 UTC m=+214.610732538" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.252023 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.252677 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.294520 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.433617 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.683955 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.684010 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:21 crc kubenswrapper[4852]: I0129 10:45:21.734323 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:22 crc kubenswrapper[4852]: I0129 10:45:22.402749 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerStarted","Data":"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82"} Jan 29 10:45:22 crc kubenswrapper[4852]: I0129 10:45:22.404518 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerStarted","Data":"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029"} Jan 29 10:45:22 crc kubenswrapper[4852]: I0129 10:45:22.443154 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:22 crc kubenswrapper[4852]: I0129 10:45:22.696851 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-84t6n"] Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.413247 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerID="1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82" exitCode=0 Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.413310 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerDied","Data":"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82"} Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.417665 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerStarted","Data":"822be194bd0a4257d4b810c074730b5ea53675a8d1fd1619efd1daa98c4320b5"} Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.420519 4852 generic.go:334] "Generic (PLEG): container finished" podID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerID="bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029" exitCode=0 Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.420626 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerDied","Data":"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029"} Jan 29 10:45:23 crc kubenswrapper[4852]: I0129 10:45:23.482341 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c75mb" podStartSLOduration=5.423749952 podStartE2EDuration="1m12.48232557s" podCreationTimestamp="2026-01-29 10:44:11 +0000 UTC" firstStartedPulling="2026-01-29 10:44:13.700934032 +0000 UTC m=+150.918265166" lastFinishedPulling="2026-01-29 10:45:20.75950965 +0000 UTC m=+217.976840784" observedRunningTime="2026-01-29 10:45:23.455605674 +0000 UTC m=+220.672936808" watchObservedRunningTime="2026-01-29 10:45:23.48232557 +0000 UTC m=+220.699656704" Jan 29 10:45:24 crc kubenswrapper[4852]: I0129 10:45:24.425358 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-84t6n" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="registry-server" containerID="cri-o://4da40245ff800aec0d7d2217df7a72cd7a6c44f21930b98dfa35555f88ca1027" gracePeriod=2 Jan 29 10:45:27 crc kubenswrapper[4852]: I0129 10:45:27.442447 4852 generic.go:334] "Generic (PLEG): container finished" podID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerID="4da40245ff800aec0d7d2217df7a72cd7a6c44f21930b98dfa35555f88ca1027" exitCode=0 Jan 29 10:45:27 crc kubenswrapper[4852]: I0129 10:45:27.442530 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerDied","Data":"4da40245ff800aec0d7d2217df7a72cd7a6c44f21930b98dfa35555f88ca1027"} Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.587391 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.689140 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpcpp\" (UniqueName: \"kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp\") pod \"d433f87b-087a-47e4-93e2-0754097e5b1b\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.689210 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities\") pod \"d433f87b-087a-47e4-93e2-0754097e5b1b\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.689271 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content\") pod \"d433f87b-087a-47e4-93e2-0754097e5b1b\" (UID: \"d433f87b-087a-47e4-93e2-0754097e5b1b\") " Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.690866 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities" (OuterVolumeSpecName: "utilities") pod "d433f87b-087a-47e4-93e2-0754097e5b1b" (UID: "d433f87b-087a-47e4-93e2-0754097e5b1b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.699899 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp" (OuterVolumeSpecName: "kube-api-access-qpcpp") pod "d433f87b-087a-47e4-93e2-0754097e5b1b" (UID: "d433f87b-087a-47e4-93e2-0754097e5b1b"). InnerVolumeSpecName "kube-api-access-qpcpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.790505 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpcpp\" (UniqueName: \"kubernetes.io/projected/d433f87b-087a-47e4-93e2-0754097e5b1b-kube-api-access-qpcpp\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:29 crc kubenswrapper[4852]: I0129 10:45:29.790537 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.016826 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.016905 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.016953 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.017538 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.017663 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25" gracePeriod=600 Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.460945 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-84t6n" event={"ID":"d433f87b-087a-47e4-93e2-0754097e5b1b","Type":"ContainerDied","Data":"1f1b9dca9b5a9a541af4d76129aa30d8299e983947eaa5441509fd784fd1c872"} Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.461004 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-84t6n" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.461027 4852 scope.go:117] "RemoveContainer" containerID="4da40245ff800aec0d7d2217df7a72cd7a6c44f21930b98dfa35555f88ca1027" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.616913 4852 scope.go:117] "RemoveContainer" containerID="8024d201ac960d59c21982f00efd17da80e51180c9dac89e5542f94788da5c88" Jan 29 10:45:30 crc kubenswrapper[4852]: I0129 10:45:30.630021 4852 scope.go:117] "RemoveContainer" containerID="4d8ccaa58f656f7d4d2b7c78b7707e0938b9e2750af5eec48143c9e68d262af8" Jan 29 10:45:31 crc kubenswrapper[4852]: I0129 10:45:31.470534 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25" exitCode=0 Jan 29 10:45:31 crc kubenswrapper[4852]: I0129 10:45:31.471964 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25"} Jan 29 10:45:31 crc kubenswrapper[4852]: I0129 10:45:31.874513 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:31 crc kubenswrapper[4852]: I0129 10:45:31.874873 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:31 crc kubenswrapper[4852]: I0129 10:45:31.915349 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.480522 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerStarted","Data":"c6fddbff821a9ea1974de72a1401f4d3bc716ac0cba7a38af92bacbb59a09b0e"} Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.489657 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c75mb"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.501499 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fbk7g"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.518912 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.519243 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-znvks" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="registry-server" containerID="cri-o://5e29417c37b504fbeb44d187e5b1bb9bdbba87bc1562e9fb534cd03174a3bee7" gracePeriod=30 Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.549032 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.549277 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" containerID="cri-o://094822b659fe8774a41c75d4bea31fcd35e0399b8a18671044178bb751e0c890" gracePeriod=30 Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.553845 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.555479 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qmx6z"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.557435 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wpbx8"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566071 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wqx8"] Jan 29 10:45:32 crc kubenswrapper[4852]: E0129 10:45:32.566324 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48672b8-4f28-46bd-9840-f37fddf2d95f" containerName="pruner" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566339 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48672b8-4f28-46bd-9840-f37fddf2d95f" containerName="pruner" Jan 29 10:45:32 crc kubenswrapper[4852]: E0129 10:45:32.566351 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="extract-content" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566360 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="extract-content" Jan 29 10:45:32 crc kubenswrapper[4852]: E0129 10:45:32.566370 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="extract-utilities" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566378 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="extract-utilities" Jan 29 10:45:32 crc kubenswrapper[4852]: E0129 10:45:32.566394 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="registry-server" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566401 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="registry-server" Jan 29 10:45:32 crc kubenswrapper[4852]: E0129 10:45:32.566415 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6996b7b-e91f-4806-875c-b579f9aa9211" containerName="collect-profiles" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566424 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6996b7b-e91f-4806-875c-b579f9aa9211" containerName="collect-profiles" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566539 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" containerName="registry-server" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566552 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a48672b8-4f28-46bd-9840-f37fddf2d95f" containerName="pruner" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.566561 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6996b7b-e91f-4806-875c-b579f9aa9211" containerName="collect-profiles" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.567021 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.569086 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n2zwn"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.569344 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-n2zwn" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" containerID="cri-o://bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" gracePeriod=30 Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.571564 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wqx8"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.574440 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7gmn"] Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.626311 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.626397 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.626425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvgvx\" (UniqueName: \"kubernetes.io/projected/4326595e-3e2b-4ec6-b44f-9e8aa025849f-kube-api-access-nvgvx\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.727787 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.727896 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.727930 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvgvx\" (UniqueName: \"kubernetes.io/projected/4326595e-3e2b-4ec6-b44f-9e8aa025849f-kube-api-access-nvgvx\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.729073 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.735373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4326595e-3e2b-4ec6-b44f-9e8aa025849f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.749907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvgvx\" (UniqueName: \"kubernetes.io/projected/4326595e-3e2b-4ec6-b44f-9e8aa025849f-kube-api-access-nvgvx\") pod \"marketplace-operator-79b997595-2wqx8\" (UID: \"4326595e-3e2b-4ec6-b44f-9e8aa025849f\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:32 crc kubenswrapper[4852]: I0129 10:45:32.881701 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.138423 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gz49m"] Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.485981 4852 generic.go:334] "Generic (PLEG): container finished" podID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerID="094822b659fe8774a41c75d4bea31fcd35e0399b8a18671044178bb751e0c890" exitCode=0 Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.486073 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" event={"ID":"4cde3fa3-1fde-45f6-891d-38f98485d443","Type":"ContainerDied","Data":"094822b659fe8774a41c75d4bea31fcd35e0399b8a18671044178bb751e0c890"} Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.486426 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c75mb" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="registry-server" containerID="cri-o://822be194bd0a4257d4b810c074730b5ea53675a8d1fd1619efd1daa98c4320b5" gracePeriod=30 Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.486474 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qmx6z" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="registry-server" containerID="cri-o://c6fddbff821a9ea1974de72a1401f4d3bc716ac0cba7a38af92bacbb59a09b0e" gracePeriod=30 Jan 29 10:45:33 crc kubenswrapper[4852]: I0129 10:45:33.511367 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qmx6z" podStartSLOduration=4.650369025 podStartE2EDuration="1m21.511342893s" podCreationTimestamp="2026-01-29 10:44:12 +0000 UTC" firstStartedPulling="2026-01-29 10:44:13.754783403 +0000 UTC m=+150.972114537" lastFinishedPulling="2026-01-29 10:45:30.615757271 +0000 UTC m=+227.833088405" observedRunningTime="2026-01-29 10:45:33.506600134 +0000 UTC m=+230.723931268" watchObservedRunningTime="2026-01-29 10:45:33.511342893 +0000 UTC m=+230.728674037" Jan 29 10:45:34 crc kubenswrapper[4852]: E0129 10:45:34.448246 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047 is running failed: container process not found" containerID="bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 10:45:34 crc kubenswrapper[4852]: E0129 10:45:34.448551 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047 is running failed: container process not found" containerID="bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 10:45:34 crc kubenswrapper[4852]: E0129 10:45:34.448886 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047 is running failed: container process not found" containerID="bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 10:45:34 crc kubenswrapper[4852]: E0129 10:45:34.448916 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-n2zwn" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.493576 4852 generic.go:334] "Generic (PLEG): container finished" podID="d1329aa3-b350-44b9-959e-ce3730a07103" containerID="c6fddbff821a9ea1974de72a1401f4d3bc716ac0cba7a38af92bacbb59a09b0e" exitCode=0 Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.493636 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerDied","Data":"c6fddbff821a9ea1974de72a1401f4d3bc716ac0cba7a38af92bacbb59a09b0e"} Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.495744 4852 generic.go:334] "Generic (PLEG): container finished" podID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerID="5e29417c37b504fbeb44d187e5b1bb9bdbba87bc1562e9fb534cd03174a3bee7" exitCode=0 Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.495822 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerDied","Data":"5e29417c37b504fbeb44d187e5b1bb9bdbba87bc1562e9fb534cd03174a3bee7"} Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.497618 4852 generic.go:334] "Generic (PLEG): container finished" podID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerID="bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" exitCode=0 Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.497692 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerDied","Data":"bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047"} Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.499241 4852 generic.go:334] "Generic (PLEG): container finished" podID="b545ab65-c670-46ea-9f48-518113c3387e" containerID="822be194bd0a4257d4b810c074730b5ea53675a8d1fd1619efd1daa98c4320b5" exitCode=0 Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.499266 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerDied","Data":"822be194bd0a4257d4b810c074730b5ea53675a8d1fd1619efd1daa98c4320b5"} Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.699673 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c75mb"] Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.739041 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.752439 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.852864 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqc9l\" (UniqueName: \"kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l\") pod \"28b1ffef-b64a-4b00-a08f-60535f0fef60\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.852923 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7ktx\" (UniqueName: \"kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx\") pod \"4cde3fa3-1fde-45f6-891d-38f98485d443\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.852970 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics\") pod \"4cde3fa3-1fde-45f6-891d-38f98485d443\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.852999 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content\") pod \"28b1ffef-b64a-4b00-a08f-60535f0fef60\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.853042 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca\") pod \"4cde3fa3-1fde-45f6-891d-38f98485d443\" (UID: \"4cde3fa3-1fde-45f6-891d-38f98485d443\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.853108 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities\") pod \"28b1ffef-b64a-4b00-a08f-60535f0fef60\" (UID: \"28b1ffef-b64a-4b00-a08f-60535f0fef60\") " Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.853948 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4cde3fa3-1fde-45f6-891d-38f98485d443" (UID: "4cde3fa3-1fde-45f6-891d-38f98485d443"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.854796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities" (OuterVolumeSpecName: "utilities") pod "28b1ffef-b64a-4b00-a08f-60535f0fef60" (UID: "28b1ffef-b64a-4b00-a08f-60535f0fef60"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.875228 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l" (OuterVolumeSpecName: "kube-api-access-zqc9l") pod "28b1ffef-b64a-4b00-a08f-60535f0fef60" (UID: "28b1ffef-b64a-4b00-a08f-60535f0fef60"). InnerVolumeSpecName "kube-api-access-zqc9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.875730 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx" (OuterVolumeSpecName: "kube-api-access-n7ktx") pod "4cde3fa3-1fde-45f6-891d-38f98485d443" (UID: "4cde3fa3-1fde-45f6-891d-38f98485d443"). InnerVolumeSpecName "kube-api-access-n7ktx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.877425 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4cde3fa3-1fde-45f6-891d-38f98485d443" (UID: "4cde3fa3-1fde-45f6-891d-38f98485d443"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.925217 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28b1ffef-b64a-4b00-a08f-60535f0fef60" (UID: "28b1ffef-b64a-4b00-a08f-60535f0fef60"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954063 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqc9l\" (UniqueName: \"kubernetes.io/projected/28b1ffef-b64a-4b00-a08f-60535f0fef60-kube-api-access-zqc9l\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954107 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7ktx\" (UniqueName: \"kubernetes.io/projected/4cde3fa3-1fde-45f6-891d-38f98485d443-kube-api-access-n7ktx\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954116 4852 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954126 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954134 4852 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cde3fa3-1fde-45f6-891d-38f98485d443-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:34 crc kubenswrapper[4852]: I0129 10:45:34.954141 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b1ffef-b64a-4b00-a08f-60535f0fef60-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.506527 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znvks" event={"ID":"28b1ffef-b64a-4b00-a08f-60535f0fef60","Type":"ContainerDied","Data":"bbff974d38cca45c6c88a04dad7cdda69f53baf63c754f10cda20d007e200d94"} Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.506576 4852 scope.go:117] "RemoveContainer" containerID="5e29417c37b504fbeb44d187e5b1bb9bdbba87bc1562e9fb534cd03174a3bee7" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.506611 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znvks" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.509062 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" event={"ID":"4cde3fa3-1fde-45f6-891d-38f98485d443","Type":"ContainerDied","Data":"9b2ec4e504a172577307d15166a84645e7d55e0817213d5b8e5306b2e2df571f"} Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.509255 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b2ghs" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.532688 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.539702 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-znvks"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.545324 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.551046 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b2ghs"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970439 4852 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970764 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d" gracePeriod=15 Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970820 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b" gracePeriod=15 Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970867 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5" gracePeriod=15 Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970905 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa" gracePeriod=15 Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.970934 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809" gracePeriod=15 Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.973317 4852 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.975387 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="registry-server" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.975414 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="registry-server" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.975426 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.975436 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.975446 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.975456 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.975466 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="extract-utilities" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.975474 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="extract-utilities" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.975489 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.976489 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.976930 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.976942 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.976953 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="extract-content" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.976960 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="extract-content" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.976971 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.976979 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.978293 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978318 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.978337 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978347 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.978360 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978367 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: E0129 10:45:35.978377 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978384 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978519 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978535 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978544 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" containerName="marketplace-operator" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978554 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978562 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" containerName="registry-server" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978593 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978604 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978612 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.978621 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.996032 4852 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 10:45:35 crc kubenswrapper[4852]: I0129 10:45:35.996897 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.000832 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d433f87b-087a-47e4-93e2-0754097e5b1b" (UID: "d433f87b-087a-47e4-93e2-0754097e5b1b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:36 crc kubenswrapper[4852]: E0129 10:45:36.031770 4852 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.070875 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.070935 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.070976 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071121 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071241 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071295 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071387 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071455 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.071567 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d433f87b-087a-47e4-93e2-0754097e5b1b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172744 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172822 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172829 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172850 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172849 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172895 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172921 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172959 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172956 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.172983 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.173014 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.173054 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.173071 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.173093 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.333248 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.515051 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.516431 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.517446 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809" exitCode=0 Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.517509 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5" exitCode=0 Jan 29 10:45:36 crc kubenswrapper[4852]: I0129 10:45:36.517535 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa" exitCode=2 Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.477718 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b1ffef-b64a-4b00-a08f-60535f0fef60" path="/var/lib/kubelet/pods/28b1ffef-b64a-4b00-a08f-60535f0fef60/volumes" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.480365 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cde3fa3-1fde-45f6-891d-38f98485d443" path="/var/lib/kubelet/pods/4cde3fa3-1fde-45f6-891d-38f98485d443/volumes" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.526921 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.528638 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.529488 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b" exitCode=0 Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.534194 4852 generic.go:334] "Generic (PLEG): container finished" podID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" containerID="b5643a24c8bf0a8ae59caca4e7af8ba2763111ce1d0f361b994950335da9c8c6" exitCode=0 Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.534286 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44","Type":"ContainerDied","Data":"b5643a24c8bf0a8ae59caca4e7af8ba2763111ce1d0f361b994950335da9c8c6"} Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.846357 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.851861 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.854965 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.902742 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content\") pod \"b545ab65-c670-46ea-9f48-518113c3387e\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.902833 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities\") pod \"d1329aa3-b350-44b9-959e-ce3730a07103\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.903024 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gvrr\" (UniqueName: \"kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr\") pod \"d1329aa3-b350-44b9-959e-ce3730a07103\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.903082 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content\") pod \"d1329aa3-b350-44b9-959e-ce3730a07103\" (UID: \"d1329aa3-b350-44b9-959e-ce3730a07103\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.912564 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities\") pod \"b545ab65-c670-46ea-9f48-518113c3387e\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.904820 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities" (OuterVolumeSpecName: "utilities") pod "d1329aa3-b350-44b9-959e-ce3730a07103" (UID: "d1329aa3-b350-44b9-959e-ce3730a07103"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.911178 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr" (OuterVolumeSpecName: "kube-api-access-9gvrr") pod "d1329aa3-b350-44b9-959e-ce3730a07103" (UID: "d1329aa3-b350-44b9-959e-ce3730a07103"). InnerVolumeSpecName "kube-api-access-9gvrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.912683 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities\") pod \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.912813 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r4vs\" (UniqueName: \"kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs\") pod \"b545ab65-c670-46ea-9f48-518113c3387e\" (UID: \"b545ab65-c670-46ea-9f48-518113c3387e\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.912877 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content\") pod \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.912980 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2z64v\" (UniqueName: \"kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v\") pod \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\" (UID: \"9409abb2-eda9-4b1c-ab1b-4fcd886879a0\") " Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.913506 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.913532 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gvrr\" (UniqueName: \"kubernetes.io/projected/d1329aa3-b350-44b9-959e-ce3730a07103-kube-api-access-9gvrr\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.914506 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities" (OuterVolumeSpecName: "utilities") pod "b545ab65-c670-46ea-9f48-518113c3387e" (UID: "b545ab65-c670-46ea-9f48-518113c3387e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.916773 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities" (OuterVolumeSpecName: "utilities") pod "9409abb2-eda9-4b1c-ab1b-4fcd886879a0" (UID: "9409abb2-eda9-4b1c-ab1b-4fcd886879a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.919323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v" (OuterVolumeSpecName: "kube-api-access-2z64v") pod "9409abb2-eda9-4b1c-ab1b-4fcd886879a0" (UID: "9409abb2-eda9-4b1c-ab1b-4fcd886879a0"). InnerVolumeSpecName "kube-api-access-2z64v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.919368 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs" (OuterVolumeSpecName: "kube-api-access-8r4vs") pod "b545ab65-c670-46ea-9f48-518113c3387e" (UID: "b545ab65-c670-46ea-9f48-518113c3387e"). InnerVolumeSpecName "kube-api-access-8r4vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:37 crc kubenswrapper[4852]: I0129 10:45:37.947106 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1329aa3-b350-44b9-959e-ce3730a07103" (UID: "d1329aa3-b350-44b9-959e-ce3730a07103"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.015437 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2z64v\" (UniqueName: \"kubernetes.io/projected/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-kube-api-access-2z64v\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.015816 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1329aa3-b350-44b9-959e-ce3730a07103-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.015992 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.016119 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.016235 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r4vs\" (UniqueName: \"kubernetes.io/projected/b545ab65-c670-46ea-9f48-518113c3387e-kube-api-access-8r4vs\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.157188 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9409abb2-eda9-4b1c-ab1b-4fcd886879a0" (UID: "9409abb2-eda9-4b1c-ab1b-4fcd886879a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.218931 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9409abb2-eda9-4b1c-ab1b-4fcd886879a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.426206 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b545ab65-c670-46ea-9f48-518113c3387e" (UID: "b545ab65-c670-46ea-9f48-518113c3387e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.523092 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b545ab65-c670-46ea-9f48-518113c3387e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.541250 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qmx6z" event={"ID":"d1329aa3-b350-44b9-959e-ce3730a07103","Type":"ContainerDied","Data":"133f8926f1c679d57de2edab5fb05d0ea5a299abc6871201ebd97601063ae11b"} Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.541394 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qmx6z" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.561090 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n2zwn" event={"ID":"9409abb2-eda9-4b1c-ab1b-4fcd886879a0","Type":"ContainerDied","Data":"c6fb2202cc654f910d94c6894918d0f193783e7aafd81e9f75c11ad72c0e3f94"} Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.561140 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n2zwn" Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.564505 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c75mb" event={"ID":"b545ab65-c670-46ea-9f48-518113c3387e","Type":"ContainerDied","Data":"d9b84b4796f4b246b89b767d24e9f97592d3fe0005bfd3dde6f98cb9767996d7"} Jan 29 10:45:38 crc kubenswrapper[4852]: I0129 10:45:38.564720 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c75mb" Jan 29 10:45:39 crc kubenswrapper[4852]: I0129 10:45:39.574531 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:45:39 crc kubenswrapper[4852]: I0129 10:45:39.577686 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 10:45:39 crc kubenswrapper[4852]: I0129 10:45:39.579476 4852 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d" exitCode=0 Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.854857 4852 scope.go:117] "RemoveContainer" containerID="787da8a511f35cdd8e56036cebdf5d2449d905221b782e48fe866c4ba51f18de" Jan 29 10:45:40 crc kubenswrapper[4852]: E0129 10:45:40.855439 4852 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-p7gmn.188f2dcde18215a0 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-p7gmn,UID:92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7,APIVersion:v1,ResourceVersion:28579,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 20.471s (20.471s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,LastTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.922258 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.957022 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access\") pod \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.957069 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock\") pod \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.957093 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir\") pod \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\" (UID: \"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44\") " Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.957425 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" (UID: "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.958073 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock" (OuterVolumeSpecName: "var-lock") pod "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" (UID: "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:40 crc kubenswrapper[4852]: I0129 10:45:40.963493 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" (UID: "37f90abf-9c0f-402f-9adf-2b9d4eeb0b44"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.007360 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.007709 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.007890 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.008059 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.008669 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.008854 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.058674 4852 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.058700 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.058710 4852 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37f90abf-9c0f-402f-9adf-2b9d4eeb0b44-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.069691 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.070799 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.071659 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.071980 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.072185 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.072472 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.072971 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.073308 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.073753 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: E0129 10:45:41.082265 4852 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-p7gmn.188f2dcde18215a0 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-p7gmn,UID:92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7,APIVersion:v1,ResourceVersion:28579,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 20.471s (20.471s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,LastTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.134793 4852 scope.go:117] "RemoveContainer" containerID="eeca353be9446424ca300a3698d7eb6471b7c5a63200ea1ee0ee9e921e3e8e01" Jan 29 10:45:41 crc kubenswrapper[4852]: W0129 10:45:41.158023 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-8c1b9cdd4b1cd9b578e55d1100731e736727bf0e76714c9c90eb51ba336e4ed0 WatchSource:0}: Error finding container 8c1b9cdd4b1cd9b578e55d1100731e736727bf0e76714c9c90eb51ba336e4ed0: Status 404 returned error can't find the container with id 8c1b9cdd4b1cd9b578e55d1100731e736727bf0e76714c9c90eb51ba336e4ed0 Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159384 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159443 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159490 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159493 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159553 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.159826 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.160099 4852 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.160126 4852 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.162639 4852 scope.go:117] "RemoveContainer" containerID="094822b659fe8774a41c75d4bea31fcd35e0399b8a18671044178bb751e0c890" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.222272 4852 scope.go:117] "RemoveContainer" containerID="5403a4406a23ba7f3db23eef916931c75c691fb99555f75d6ef24845bb6b246e" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.263081 4852 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.273920 4852 scope.go:117] "RemoveContainer" containerID="c6fddbff821a9ea1974de72a1401f4d3bc716ac0cba7a38af92bacbb59a09b0e" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.288567 4852 scope.go:117] "RemoveContainer" containerID="63065226604e71ec27ed2ba96dfdd95d9755e0b6eacf00db0bb69aea99f75a5f" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.305318 4852 scope.go:117] "RemoveContainer" containerID="a6094befa4d7db6a987015d0e69a94a7a6cd1aa62cedbfb9f2a4a97791ec265f" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.320769 4852 scope.go:117] "RemoveContainer" containerID="bd3a94bd832e7cf9b486cce606343b8f0abd0b449297940690423b6c01b1d047" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.347457 4852 scope.go:117] "RemoveContainer" containerID="a9a00da4276d87b03e729bc6aece7515facd8588f3e943d4943cf016f740dc0d" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.364357 4852 scope.go:117] "RemoveContainer" containerID="1c8f9bd05a3c416282e731a485924ad30dbc5fae7e76e8ae7f9f086fbc159852" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.382969 4852 scope.go:117] "RemoveContainer" containerID="822be194bd0a4257d4b810c074730b5ea53675a8d1fd1619efd1daa98c4320b5" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.423707 4852 scope.go:117] "RemoveContainer" containerID="b465866d7ec5630c69479b9f172bd31060bcf00b547b02b0eaa064282c88e503" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.443337 4852 scope.go:117] "RemoveContainer" containerID="3647c0e5f5583d66b7bb5e860b70b8d58f9e4be42de2882a3c117e6b720ecf58" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.475512 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 29 10:45:41 crc kubenswrapper[4852]: E0129 10:45:41.498720 4852 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 29 10:45:41 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e" Netns:"/var/run/netns/0747a503-a7a2-49b8-9eac-12a320e1f3b4" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:41 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:41 crc kubenswrapper[4852]: > Jan 29 10:45:41 crc kubenswrapper[4852]: E0129 10:45:41.498782 4852 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 29 10:45:41 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e" Netns:"/var/run/netns/0747a503-a7a2-49b8-9eac-12a320e1f3b4" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:41 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:41 crc kubenswrapper[4852]: > pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:41 crc kubenswrapper[4852]: E0129 10:45:41.498801 4852 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Jan 29 10:45:41 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e" Netns:"/var/run/netns/0747a503-a7a2-49b8-9eac-12a320e1f3b4" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:41 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:41 crc kubenswrapper[4852]: > pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:41 crc kubenswrapper[4852]: E0129 10:45:41.498851 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e\\\" Netns:\\\"/var/run/netns/0747a503-a7a2-49b8-9eac-12a320e1f3b4\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=744f8037f5ff6ce29773d53b6a1a4314bc044081235629e3efb246569d9e773e;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s\\\": dial tcp 38.102.83.23:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podUID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.595382 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerStarted","Data":"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.599171 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerStarted","Data":"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.604237 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerStarted","Data":"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.613175 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.613777 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.613787 4852 scope.go:117] "RemoveContainer" containerID="338aac3a57bc0aad007a6c46e234ca97cdfc9837700969235d0c619974496809" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.614451 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.614941 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.615650 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.616317 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.616883 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.617203 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.617549 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.617906 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.618235 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.618495 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.618890 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.619151 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.621679 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.625561 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.625699 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37f90abf-9c0f-402f-9adf-2b9d4eeb0b44","Type":"ContainerDied","Data":"579053b2158b9c7655ec974e140106ebb39a254597e5be3fedc19dc6a2f8680d"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.625742 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="579053b2158b9c7655ec974e140106ebb39a254597e5be3fedc19dc6a2f8680d" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.629681 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8c1b9cdd4b1cd9b578e55d1100731e736727bf0e76714c9c90eb51ba336e4ed0"} Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.631006 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.631262 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.631396 4852 scope.go:117] "RemoveContainer" containerID="9624f1c255c86128fefff070434c723bc611d5eedb2143a00534b0a30664ec4b" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.631553 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.631765 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.632349 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.632534 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.634973 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.635359 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.657668 4852 scope.go:117] "RemoveContainer" containerID="f5534275da7819065a31a88af45f7d83f6a6ecf88f388db9a11ab516210a53a5" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.677814 4852 scope.go:117] "RemoveContainer" containerID="680edc43e1a4e7412825a3a60cbaaa1c3086a991801ec89cc8621b8e41ab7caa" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.695955 4852 scope.go:117] "RemoveContainer" containerID="01a93381741e9d99574f806ea3874ad1e79bcd1b86c03de0d0ff902d4601f34d" Jan 29 10:45:41 crc kubenswrapper[4852]: I0129 10:45:41.766425 4852 scope.go:117] "RemoveContainer" containerID="73dea74ba82935ece755f72d97912364580664a834af22c6698d5f00df526726" Jan 29 10:45:42 crc kubenswrapper[4852]: E0129 10:45:42.084933 4852 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 29 10:45:42 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3" Netns:"/var/run/netns/1d0cfafa-e44f-4857-9e43-0902bd40352e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:42 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:42 crc kubenswrapper[4852]: > Jan 29 10:45:42 crc kubenswrapper[4852]: E0129 10:45:42.085226 4852 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 29 10:45:42 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3" Netns:"/var/run/netns/1d0cfafa-e44f-4857-9e43-0902bd40352e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:42 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:42 crc kubenswrapper[4852]: > pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:42 crc kubenswrapper[4852]: E0129 10:45:42.085252 4852 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Jan 29 10:45:42 crc kubenswrapper[4852]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3" Netns:"/var/run/netns/1d0cfafa-e44f-4857-9e43-0902bd40352e" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s": dial tcp 38.102.83.23:6443: connect: connection refused Jan 29 10:45:42 crc kubenswrapper[4852]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 29 10:45:42 crc kubenswrapper[4852]: > pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:42 crc kubenswrapper[4852]: E0129 10:45:42.085316 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-2wqx8_openshift-marketplace_4326595e-3e2b-4ec6-b44f-9e8aa025849f_0(455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3): error adding pod openshift-marketplace_marketplace-operator-79b997595-2wqx8 to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3\\\" Netns:\\\"/var/run/netns/1d0cfafa-e44f-4857-9e43-0902bd40352e\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-2wqx8;K8S_POD_INFRA_CONTAINER_ID=455ef789298cf6820a5fcf703f7113b7bfde6813cc3e75b313970f0838fa7de3;K8S_POD_UID=4326595e-3e2b-4ec6-b44f-9e8aa025849f\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-2wqx8] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-2wqx8/4326595e-3e2b-4ec6-b44f-9e8aa025849f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-2wqx8 in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-2wqx8?timeout=1m0s\\\": dial tcp 38.102.83.23:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podUID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.643305 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6"} Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.643737 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p7gmn" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="registry-server" containerID="cri-o://5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c" gracePeriod=30 Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.643865 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wpbx8" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="registry-server" containerID="cri-o://31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f" gracePeriod=30 Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.643895 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fbk7g" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="registry-server" containerID="cri-o://fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0" gracePeriod=30 Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.644184 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.644351 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.644489 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.644660 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: E0129 10:45:42.644756 4852 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.644881 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.645107 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.645360 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.645827 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646016 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646204 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646375 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646564 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646763 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.646937 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.647105 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.647271 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.647488 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.973363 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-wpbx8_eb3421a0-7b6e-40b6-9f51-df284594f711/registry-server/0.log" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.974303 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.974820 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.975310 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.975851 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.976232 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.976524 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.976791 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.977063 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.977342 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.977556 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:42 crc kubenswrapper[4852]: I0129 10:45:42.977842 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.083648 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities\") pod \"eb3421a0-7b6e-40b6-9f51-df284594f711\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.083712 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h8ng\" (UniqueName: \"kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng\") pod \"eb3421a0-7b6e-40b6-9f51-df284594f711\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.083801 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content\") pod \"eb3421a0-7b6e-40b6-9f51-df284594f711\" (UID: \"eb3421a0-7b6e-40b6-9f51-df284594f711\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.085377 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities" (OuterVolumeSpecName: "utilities") pod "eb3421a0-7b6e-40b6-9f51-df284594f711" (UID: "eb3421a0-7b6e-40b6-9f51-df284594f711"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.092701 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng" (OuterVolumeSpecName: "kube-api-access-5h8ng") pod "eb3421a0-7b6e-40b6-9f51-df284594f711" (UID: "eb3421a0-7b6e-40b6-9f51-df284594f711"). InnerVolumeSpecName "kube-api-access-5h8ng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.106325 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb3421a0-7b6e-40b6-9f51-df284594f711" (UID: "eb3421a0-7b6e-40b6-9f51-df284594f711"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.184713 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.184749 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3421a0-7b6e-40b6-9f51-df284594f711-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.184759 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h8ng\" (UniqueName: \"kubernetes.io/projected/eb3421a0-7b6e-40b6-9f51-df284594f711-kube-api-access-5h8ng\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.297863 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.298545 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.299172 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.299668 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.300015 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.300329 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.300601 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.300874 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.301187 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.301447 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.301893 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.387140 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m2dn\" (UniqueName: \"kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn\") pod \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.387198 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities\") pod \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.387307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content\") pod \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\" (UID: \"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9\") " Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.389459 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities" (OuterVolumeSpecName: "utilities") pod "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" (UID: "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.393853 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn" (OuterVolumeSpecName: "kube-api-access-4m2dn") pod "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" (UID: "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9"). InnerVolumeSpecName "kube-api-access-4m2dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.427901 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" (UID: "4ce2c002-428f-4b74-b25a-b9e4c9ed11d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.474814 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.475372 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.475854 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.478149 4852 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.478774 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.479196 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.479684 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.480330 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.480876 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.481185 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.488477 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.488508 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m2dn\" (UniqueName: \"kubernetes.io/projected/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-kube-api-access-4m2dn\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.488524 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.617155 4852 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.617743 4852 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.617968 4852 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.618285 4852 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.618736 4852 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.618767 4852 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.619059 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="200ms" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.652914 4852 generic.go:334] "Generic (PLEG): container finished" podID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerID="31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f" exitCode=1 Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.652970 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerDied","Data":"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f"} Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.652991 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wpbx8" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.653235 4852 scope.go:117] "RemoveContainer" containerID="31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.653219 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wpbx8" event={"ID":"eb3421a0-7b6e-40b6-9f51-df284594f711","Type":"ContainerDied","Data":"ca088a1bb61ef2716eb58d8e0c3c43c17137cc35b5ba1273370e2d728885462d"} Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.654187 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.656493 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.656862 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.657195 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.657512 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.657874 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.658389 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.658768 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.659301 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.661747 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerID="fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0" exitCode=0 Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.661807 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerDied","Data":"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0"} Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.661855 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbk7g" event={"ID":"4ce2c002-428f-4b74-b25a-b9e4c9ed11d9","Type":"ContainerDied","Data":"8b075aa92e5a4b6e31f9702e5285dcf20a48849d5df65e54651f6b98c5dcfb68"} Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.661872 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.662446 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.662496 4852 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.662724 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.663010 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbk7g" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.663061 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.663387 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.669105 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.669466 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.669845 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.670150 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.670500 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.670794 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.671177 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.671479 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.671793 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.671825 4852 scope.go:117] "RemoveContainer" containerID="bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.672358 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.672690 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.673012 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.673381 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.740774 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.741332 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.741667 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.741979 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.742227 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.742449 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.742696 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.742943 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.743149 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.753139 4852 scope.go:117] "RemoveContainer" containerID="00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.771178 4852 scope.go:117] "RemoveContainer" containerID="31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.771761 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f\": container with ID starting with 31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f not found: ID does not exist" containerID="31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.771802 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f"} err="failed to get container status \"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f\": rpc error: code = NotFound desc = could not find container \"31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f\": container with ID starting with 31fe9866bd40165f3b0c9c102e1a5051c9ed43732f6a265bd89a934ae31d721f not found: ID does not exist" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.771829 4852 scope.go:117] "RemoveContainer" containerID="bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.772275 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029\": container with ID starting with bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029 not found: ID does not exist" containerID="bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.772326 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029"} err="failed to get container status \"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029\": rpc error: code = NotFound desc = could not find container \"bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029\": container with ID starting with bf930349d40df93a979f15a3ebc073835fc17d995189fc55674c109de9b8c029 not found: ID does not exist" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.772367 4852 scope.go:117] "RemoveContainer" containerID="00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.772732 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b\": container with ID starting with 00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b not found: ID does not exist" containerID="00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.772766 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b"} err="failed to get container status \"00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b\": rpc error: code = NotFound desc = could not find container \"00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b\": container with ID starting with 00dfb35393e3fc4051ae7fcabf5185fff5417650e467afbb682408ddd18e763b not found: ID does not exist" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.772785 4852 scope.go:117] "RemoveContainer" containerID="fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.787856 4852 scope.go:117] "RemoveContainer" containerID="1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.806916 4852 scope.go:117] "RemoveContainer" containerID="a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.820117 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="400ms" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.822937 4852 scope.go:117] "RemoveContainer" containerID="fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.823316 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0\": container with ID starting with fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0 not found: ID does not exist" containerID="fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.823350 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0"} err="failed to get container status \"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0\": rpc error: code = NotFound desc = could not find container \"fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0\": container with ID starting with fdfbb38bd5003946e74657118a1f03a7aed2b61858c010dfb90e04be2875b9a0 not found: ID does not exist" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.823375 4852 scope.go:117] "RemoveContainer" containerID="1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.823853 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82\": container with ID starting with 1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82 not found: ID does not exist" containerID="1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.823877 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82"} err="failed to get container status \"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82\": rpc error: code = NotFound desc = could not find container \"1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82\": container with ID starting with 1dbc594ebf191ec1587acf8ff1856a6d2651fba6a632efbd8353baa1966d3e82 not found: ID does not exist" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.823895 4852 scope.go:117] "RemoveContainer" containerID="a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972" Jan 29 10:45:43 crc kubenswrapper[4852]: E0129 10:45:43.824234 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972\": container with ID starting with a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972 not found: ID does not exist" containerID="a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972" Jan 29 10:45:43 crc kubenswrapper[4852]: I0129 10:45:43.824291 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972"} err="failed to get container status \"a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972\": rpc error: code = NotFound desc = could not find container \"a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972\": container with ID starting with a42e62929017c63ee5da0011a2a22d83f71b251b9855f01937cd5f0749f72972 not found: ID does not exist" Jan 29 10:45:44 crc kubenswrapper[4852]: E0129 10:45:44.221292 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="800ms" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.637696 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-p7gmn_92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7/registry-server/0.log" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.638356 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.638865 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.639142 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.639481 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.640177 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.640424 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.640807 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.641212 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.641453 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.641769 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.673926 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-p7gmn_92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7/registry-server/0.log" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.674954 4852 generic.go:334] "Generic (PLEG): container finished" podID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerID="5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c" exitCode=1 Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.674993 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7gmn" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.675013 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerDied","Data":"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c"} Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.675062 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7gmn" event={"ID":"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7","Type":"ContainerDied","Data":"28ab38bf66df497c688df67390c098da56d80abf0629626b193443cde6116dea"} Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.675090 4852 scope.go:117] "RemoveContainer" containerID="5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.675931 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.676363 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.676846 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.677191 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.677467 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.677939 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.678496 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.678992 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.679425 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.694312 4852 scope.go:117] "RemoveContainer" containerID="e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.702902 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities\") pod \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.703053 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcnzx\" (UniqueName: \"kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx\") pod \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.703236 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content\") pod \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\" (UID: \"92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7\") " Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.703762 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities" (OuterVolumeSpecName: "utilities") pod "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" (UID: "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.709882 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx" (OuterVolumeSpecName: "kube-api-access-kcnzx") pod "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" (UID: "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7"). InnerVolumeSpecName "kube-api-access-kcnzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.718419 4852 scope.go:117] "RemoveContainer" containerID="dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.756906 4852 scope.go:117] "RemoveContainer" containerID="5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c" Jan 29 10:45:44 crc kubenswrapper[4852]: E0129 10:45:44.759008 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c\": container with ID starting with 5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c not found: ID does not exist" containerID="5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.759042 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c"} err="failed to get container status \"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c\": rpc error: code = NotFound desc = could not find container \"5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c\": container with ID starting with 5622f6e2bf51f21614df988999e4662949bd719eba61edb7a74bc876ff6c078c not found: ID does not exist" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.759062 4852 scope.go:117] "RemoveContainer" containerID="e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009" Jan 29 10:45:44 crc kubenswrapper[4852]: E0129 10:45:44.759454 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009\": container with ID starting with e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009 not found: ID does not exist" containerID="e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.759481 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009"} err="failed to get container status \"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009\": rpc error: code = NotFound desc = could not find container \"e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009\": container with ID starting with e9eef57cd732c76f18f19ecda63260809a95dee7ad9ce18949f5cb631966e009 not found: ID does not exist" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.759493 4852 scope.go:117] "RemoveContainer" containerID="dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa" Jan 29 10:45:44 crc kubenswrapper[4852]: E0129 10:45:44.759844 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa\": container with ID starting with dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa not found: ID does not exist" containerID="dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.759887 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa"} err="failed to get container status \"dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa\": rpc error: code = NotFound desc = could not find container \"dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa\": container with ID starting with dcdf1314618555fcf8dfd0270e652a5b5a3fb088e435db8bc320ae8a8d70dfaa not found: ID does not exist" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.804462 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcnzx\" (UniqueName: \"kubernetes.io/projected/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-kube-api-access-kcnzx\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.804833 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.877692 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" (UID: "92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.906475 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.995666 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.996242 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.996758 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.997243 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.997681 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.998022 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.998447 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.998843 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:44 crc kubenswrapper[4852]: I0129 10:45:44.999294 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:45 crc kubenswrapper[4852]: E0129 10:45:45.023955 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="1.6s" Jan 29 10:45:46 crc kubenswrapper[4852]: E0129 10:45:46.624799 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="3.2s" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.462634 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.463438 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.463766 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.463991 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.464254 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.464528 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.464766 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.465022 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.465299 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.465547 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.485267 4852 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.485306 4852 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:49 crc kubenswrapper[4852]: E0129 10:45:49.485811 4852 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.486465 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:49 crc kubenswrapper[4852]: W0129 10:45:49.517468 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-1e018aee81a81f9a80a45411d7c1c21f68801e1e258bd7a05db8c1a7c0be971f WatchSource:0}: Error finding container 1e018aee81a81f9a80a45411d7c1c21f68801e1e258bd7a05db8c1a7c0be971f: Status 404 returned error can't find the container with id 1e018aee81a81f9a80a45411d7c1c21f68801e1e258bd7a05db8c1a7c0be971f Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.704504 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.704806 4852 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae" exitCode=1 Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.704903 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae"} Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.705717 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1e018aee81a81f9a80a45411d7c1c21f68801e1e258bd7a05db8c1a7c0be971f"} Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.705746 4852 scope.go:117] "RemoveContainer" containerID="021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.705962 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.706490 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.706953 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.707178 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.707490 4852 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.707901 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.708261 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.708682 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.709081 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: I0129 10:45:49.709495 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:49 crc kubenswrapper[4852]: E0129 10:45:49.826416 4852 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="6.4s" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.533972 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.715912 4852 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="ade2b75dd97efdd5510dbbc9c94cd7128e0180a29a8887685f29c4bb0e83e712" exitCode=0 Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.715991 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"ade2b75dd97efdd5510dbbc9c94cd7128e0180a29a8887685f29c4bb0e83e712"} Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.717041 4852 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.717261 4852 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.717806 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: E0129 10:45:50.718198 4852 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.718277 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.718983 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.719445 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.719999 4852 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.720826 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.722208 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.723246 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.723409 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.723449 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"19602dd5c212b30e348d320d66e4d8b4d022bf0ab776f245fbc9e7f2e86b4a7d"} Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.723923 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.724316 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.725016 4852 status_manager.go:851] "Failed to get status for pod" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-zdz6d\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.725499 4852 status_manager.go:851] "Failed to get status for pod" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" pod="openshift-marketplace/community-operators-84t6n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-84t6n\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.725962 4852 status_manager.go:851] "Failed to get status for pod" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" pod="openshift-marketplace/redhat-marketplace-wpbx8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wpbx8\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.726451 4852 status_manager.go:851] "Failed to get status for pod" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.726920 4852 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.727324 4852 status_manager.go:851] "Failed to get status for pod" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" pod="openshift-marketplace/certified-operators-fbk7g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fbk7g\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.727723 4852 status_manager.go:851] "Failed to get status for pod" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" pod="openshift-marketplace/redhat-operators-p7gmn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-p7gmn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.728145 4852 status_manager.go:851] "Failed to get status for pod" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" pod="openshift-marketplace/redhat-operators-n2zwn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n2zwn\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.728785 4852 status_manager.go:851] "Failed to get status for pod" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" pod="openshift-marketplace/redhat-marketplace-qmx6z" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qmx6z\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:50 crc kubenswrapper[4852]: I0129 10:45:50.729330 4852 status_manager.go:851] "Failed to get status for pod" podUID="b545ab65-c670-46ea-9f48-518113c3387e" pod="openshift-marketplace/certified-operators-c75mb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-c75mb\": dial tcp 38.102.83.23:6443: connect: connection refused" Jan 29 10:45:51 crc kubenswrapper[4852]: E0129 10:45:51.083392 4852 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-p7gmn.188f2dcde18215a0 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-p7gmn,UID:92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7,APIVersion:v1,ResourceVersion:28579,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 20.471s (20.471s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,LastTimestamp:2026-01-29 10:45:40.854838688 +0000 UTC m=+238.072169822,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 10:45:51 crc kubenswrapper[4852]: I0129 10:45:51.731775 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"73275c50494cb01376401e739b294844addcf6ddf35a97e00daa4746d5944f06"} Jan 29 10:45:51 crc kubenswrapper[4852]: I0129 10:45:51.732123 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f7818ec75c6105520f37d21e9b84796745ae09aa9c69fe81dfc9e8f57e2decc7"} Jan 29 10:45:51 crc kubenswrapper[4852]: I0129 10:45:51.732139 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5a1c60d5857e339eed93cdd5ce95499523c26a807afaf312f0847dab14b3cb9a"} Jan 29 10:45:52 crc kubenswrapper[4852]: I0129 10:45:52.740417 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"471207bb68c596597372965887631cd4cbb91fcefbbd0ea262ee47bc6c49a556"} Jan 29 10:45:52 crc kubenswrapper[4852]: I0129 10:45:52.740840 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"883dc05780210a9b6ab6722a0625ea5498f5e7221f5e0b861c5b6a4bfb0a7f65"} Jan 29 10:45:52 crc kubenswrapper[4852]: I0129 10:45:52.740864 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:52 crc kubenswrapper[4852]: I0129 10:45:52.740751 4852 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:52 crc kubenswrapper[4852]: I0129 10:45:52.740888 4852 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:53 crc kubenswrapper[4852]: I0129 10:45:53.463299 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:53 crc kubenswrapper[4852]: I0129 10:45:53.463950 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.329011 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.329216 4852 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.329632 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.487408 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.487504 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:54 crc kubenswrapper[4852]: I0129 10:45:54.493173 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:57 crc kubenswrapper[4852]: I0129 10:45:57.752937 4852 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:57 crc kubenswrapper[4852]: I0129 10:45:57.954769 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b2afcbac-871c-489b-a481-1ecbf7a58fb9" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.179725 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerName="oauth-openshift" containerID="cri-o://a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420" gracePeriod=15 Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.537155 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584695 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584752 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584789 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584810 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584844 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584868 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584897 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584915 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584931 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584958 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.584973 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.585014 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.585031 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsglv\" (UniqueName: \"kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.585068 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs\") pod \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\" (UID: \"a56c210f-0186-49e4-b21c-bf46c22ab3dd\") " Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.585129 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.585576 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586205 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586293 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586314 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586524 4852 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586541 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586553 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.586945 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.587058 4852 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a56c210f-0186-49e4-b21c-bf46c22ab3dd-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.590400 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv" (OuterVolumeSpecName: "kube-api-access-zsglv") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "kube-api-access-zsglv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.590835 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.591041 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.591411 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.591898 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.591973 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.592162 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.592973 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.593333 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a56c210f-0186-49e4-b21c-bf46c22ab3dd" (UID: "a56c210f-0186-49e4-b21c-bf46c22ab3dd"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688342 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688395 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688409 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688420 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688429 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688441 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688452 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688461 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsglv\" (UniqueName: \"kubernetes.io/projected/a56c210f-0186-49e4-b21c-bf46c22ab3dd-kube-api-access-zsglv\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.688471 4852 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a56c210f-0186-49e4-b21c-bf46c22ab3dd-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.782672 4852 generic.go:334] "Generic (PLEG): container finished" podID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerID="a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420" exitCode=0 Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.782714 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.782757 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" event={"ID":"a56c210f-0186-49e4-b21c-bf46c22ab3dd","Type":"ContainerDied","Data":"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420"} Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.782789 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gz49m" event={"ID":"a56c210f-0186-49e4-b21c-bf46c22ab3dd","Type":"ContainerDied","Data":"5ce0246087f288a7d1fffde37b8f4fd74867b0ff32cd670471f7fb7f15186275"} Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.782808 4852 scope.go:117] "RemoveContainer" containerID="a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.784187 4852 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.784204 4852 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.784343 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" event={"ID":"4326595e-3e2b-4ec6-b44f-9e8aa025849f","Type":"ContainerStarted","Data":"2efdeb13ce3aaa4423edfc08cfa3a3ead3d87986dc0cb2897e99f8fa622e9181"} Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.789127 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b2afcbac-871c-489b-a481-1ecbf7a58fb9" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.790046 4852 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://5a1c60d5857e339eed93cdd5ce95499523c26a807afaf312f0847dab14b3cb9a" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.790067 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.810744 4852 scope.go:117] "RemoveContainer" containerID="a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420" Jan 29 10:45:58 crc kubenswrapper[4852]: E0129 10:45:58.811178 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420\": container with ID starting with a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420 not found: ID does not exist" containerID="a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420" Jan 29 10:45:58 crc kubenswrapper[4852]: I0129 10:45:58.811249 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420"} err="failed to get container status \"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420\": rpc error: code = NotFound desc = could not find container \"a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420\": container with ID starting with a6e905121eb0a0ff6b412cadb70004ad7a97ccaa56a7a94c446d18f1b84c7420 not found: ID does not exist" Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.789818 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/0.log" Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.790167 4852 generic.go:334] "Generic (PLEG): container finished" podID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" containerID="79135b65bcb41ea0def7196d3a3e738542b25780efeca72316dda66563db7dee" exitCode=1 Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.790715 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" event={"ID":"4326595e-3e2b-4ec6-b44f-9e8aa025849f","Type":"ContainerDied","Data":"79135b65bcb41ea0def7196d3a3e738542b25780efeca72316dda66563db7dee"} Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.790768 4852 scope.go:117] "RemoveContainer" containerID="79135b65bcb41ea0def7196d3a3e738542b25780efeca72316dda66563db7dee" Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.792572 4852 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.792692 4852 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="ee5480f9-5e52-428b-9a1b-f49689d99ab4" Jan 29 10:45:59 crc kubenswrapper[4852]: I0129 10:45:59.809186 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b2afcbac-871c-489b-a481-1ecbf7a58fb9" Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.532969 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.799431 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/1.log" Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.800292 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/0.log" Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.800343 4852 generic.go:334] "Generic (PLEG): container finished" podID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" containerID="e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a" exitCode=1 Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.800373 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" event={"ID":"4326595e-3e2b-4ec6-b44f-9e8aa025849f","Type":"ContainerDied","Data":"e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a"} Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.800409 4852 scope.go:117] "RemoveContainer" containerID="79135b65bcb41ea0def7196d3a3e738542b25780efeca72316dda66563db7dee" Jan 29 10:46:00 crc kubenswrapper[4852]: I0129 10:46:00.800874 4852 scope.go:117] "RemoveContainer" containerID="e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a" Jan 29 10:46:00 crc kubenswrapper[4852]: E0129 10:46:00.801064 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podUID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" Jan 29 10:46:01 crc kubenswrapper[4852]: I0129 10:46:01.808935 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/1.log" Jan 29 10:46:01 crc kubenswrapper[4852]: I0129 10:46:01.813749 4852 scope.go:117] "RemoveContainer" containerID="e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a" Jan 29 10:46:01 crc kubenswrapper[4852]: E0129 10:46:01.814023 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podUID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" Jan 29 10:46:02 crc kubenswrapper[4852]: I0129 10:46:02.882157 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:46:02 crc kubenswrapper[4852]: I0129 10:46:02.882227 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:46:02 crc kubenswrapper[4852]: I0129 10:46:02.882728 4852 scope.go:117] "RemoveContainer" containerID="e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a" Jan 29 10:46:02 crc kubenswrapper[4852]: E0129 10:46:02.882908 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-2wqx8_openshift-marketplace(4326595e-3e2b-4ec6-b44f-9e8aa025849f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podUID="4326595e-3e2b-4ec6-b44f-9e8aa025849f" Jan 29 10:46:04 crc kubenswrapper[4852]: I0129 10:46:04.329091 4852 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 10:46:04 crc kubenswrapper[4852]: I0129 10:46:04.329152 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 10:46:07 crc kubenswrapper[4852]: I0129 10:46:07.580400 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 10:46:07 crc kubenswrapper[4852]: I0129 10:46:07.844446 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 10:46:07 crc kubenswrapper[4852]: I0129 10:46:07.846124 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 10:46:08 crc kubenswrapper[4852]: I0129 10:46:08.101684 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 10:46:08 crc kubenswrapper[4852]: I0129 10:46:08.289825 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 10:46:08 crc kubenswrapper[4852]: I0129 10:46:08.743256 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.152248 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.410939 4852 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.441634 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.639551 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.847042 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 10:46:09 crc kubenswrapper[4852]: I0129 10:46:09.997666 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.063727 4852 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.395005 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.493127 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.497522 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.562630 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.691744 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.710045 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.723321 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.745798 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.750103 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.795374 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.899202 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 10:46:10 crc kubenswrapper[4852]: I0129 10:46:10.985680 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.112872 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.214455 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.218291 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.248559 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.460432 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.488945 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.618370 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.754373 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.822275 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.836982 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.907959 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 10:46:11 crc kubenswrapper[4852]: I0129 10:46:11.918108 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.038301 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.053070 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.095935 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.098645 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.107299 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.234842 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.269187 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.273749 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.275617 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.367870 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.410364 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.428764 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.433022 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.458903 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.475924 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.478151 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.499180 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.511508 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.709370 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.730359 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.754734 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.801927 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.836276 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.836353 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.836466 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.849241 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 10:46:12 crc kubenswrapper[4852]: I0129 10:46:12.882104 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.018201 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.052317 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.064829 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.153787 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.163616 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.176716 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.191881 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.211084 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.328388 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.339365 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.360513 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.486472 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.528767 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.654774 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.679956 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.688888 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.689192 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.773792 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.878045 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.939100 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.949273 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.957780 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 10:46:13 crc kubenswrapper[4852]: I0129 10:46:13.984082 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.153467 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.298447 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.329441 4852 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.329513 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.329569 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.330198 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"19602dd5c212b30e348d320d66e4d8b4d022bf0ab776f245fbc9e7f2e86b4a7d"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.330335 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://19602dd5c212b30e348d320d66e4d8b4d022bf0ab776f245fbc9e7f2e86b4a7d" gracePeriod=30 Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.444285 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.474143 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.496295 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.497024 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.612155 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.618015 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.677264 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.755663 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.788315 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 10:46:14 crc kubenswrapper[4852]: I0129 10:46:14.849279 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.029044 4852 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.126093 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.138848 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.199295 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.207668 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.266114 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.317458 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.329087 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.548129 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.674966 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.793503 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.906800 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.911327 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 10:46:15 crc kubenswrapper[4852]: I0129 10:46:15.959517 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.053549 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.057536 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.214068 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.251658 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.336543 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.406243 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.406243 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.474396 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.498059 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.502935 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.504961 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.521366 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.578316 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.627834 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.682049 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.750992 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.861464 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.877562 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.987345 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 10:46:16 crc kubenswrapper[4852]: I0129 10:46:16.997905 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.041300 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.057732 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.067316 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.289140 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.316444 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.333280 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.411485 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.463261 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.711131 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.741175 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 10:46:17 crc kubenswrapper[4852]: I0129 10:46:17.908516 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.029342 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.070229 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.075955 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.235599 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.323040 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.347807 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.348529 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.402490 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.462614 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.463024 4852 scope.go:117] "RemoveContainer" containerID="e10047f5d887041ab0cd8ae5e9bf8f36cd18a438c5c6cbf776f0c89e95f7bc5a" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.467273 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.628870 4852 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.633734 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p7gmn","openshift-marketplace/community-operators-84t6n","openshift-marketplace/redhat-marketplace-wpbx8","openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-gz49m","openshift-marketplace/certified-operators-fbk7g","openshift-marketplace/redhat-operators-n2zwn","openshift-marketplace/redhat-marketplace-qmx6z","openshift-marketplace/certified-operators-c75mb"] Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.633854 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.633874 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wqx8"] Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.638333 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.653877 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.653856345 podStartE2EDuration="21.653856345s" podCreationTimestamp="2026-01-29 10:45:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:46:18.650364789 +0000 UTC m=+275.867695933" watchObservedRunningTime="2026-01-29 10:46:18.653856345 +0000 UTC m=+275.871187489" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.666768 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.796213 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.833454 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.890040 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/1.log" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.890214 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" event={"ID":"4326595e-3e2b-4ec6-b44f-9e8aa025849f","Type":"ContainerStarted","Data":"6240b071fea14cf9e1acd3a3ed71edc9e80252a72833372fd09d564f175e1826"} Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.890489 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.893636 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.905236 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2wqx8" podStartSLOduration=46.905219545 podStartE2EDuration="46.905219545s" podCreationTimestamp="2026-01-29 10:45:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:46:18.903761749 +0000 UTC m=+276.121092883" watchObservedRunningTime="2026-01-29 10:46:18.905219545 +0000 UTC m=+276.122550679" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.945613 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 10:46:18 crc kubenswrapper[4852]: I0129 10:46:18.983461 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.080213 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.149899 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.178335 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.317918 4852 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.330532 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.371738 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.371787 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.403809 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.423704 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.470390 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" path="/var/lib/kubelet/pods/4ce2c002-428f-4b74-b25a-b9e4c9ed11d9/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.471033 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" path="/var/lib/kubelet/pods/92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.471618 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" path="/var/lib/kubelet/pods/9409abb2-eda9-4b1c-ab1b-4fcd886879a0/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.472214 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" path="/var/lib/kubelet/pods/a56c210f-0186-49e4-b21c-bf46c22ab3dd/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.472849 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b545ab65-c670-46ea-9f48-518113c3387e" path="/var/lib/kubelet/pods/b545ab65-c670-46ea-9f48-518113c3387e/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.473502 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" path="/var/lib/kubelet/pods/d1329aa3-b350-44b9-959e-ce3730a07103/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.474186 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d433f87b-087a-47e4-93e2-0754097e5b1b" path="/var/lib/kubelet/pods/d433f87b-087a-47e4-93e2-0754097e5b1b/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.475855 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" path="/var/lib/kubelet/pods/eb3421a0-7b6e-40b6-9f51-df284594f711/volumes" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.511980 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.543874 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.581078 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.605265 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.646289 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.752860 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.841143 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 10:46:19 crc kubenswrapper[4852]: I0129 10:46:19.981178 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.066938 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.150982 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.188802 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.198680 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.201785 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.211007 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.211492 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.212374 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.262132 4852 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.262349 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6" gracePeriod=5 Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.477417 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.490265 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.519266 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.519785 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.560046 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.647367 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.698189 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.735633 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.910685 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.948614 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 10:46:20 crc kubenswrapper[4852]: I0129 10:46:20.955797 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.017083 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.049124 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.501698 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.536603 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.747870 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.793810 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.873100 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 10:46:21 crc kubenswrapper[4852]: I0129 10:46:21.877674 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.014729 4852 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.023303 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.064798 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.114172 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.144806 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.199949 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.237265 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.361283 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.405414 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.491483 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.574283 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.597054 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.689871 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.759213 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 10:46:22 crc kubenswrapper[4852]: I0129 10:46:22.989275 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.067674 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.220256 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.274483 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.346663 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370453 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt"] Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370739 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370756 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370770 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370780 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370789 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370797 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370810 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370817 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370828 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370837 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370850 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerName="oauth-openshift" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370859 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerName="oauth-openshift" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370869 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370877 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370886 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370893 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370903 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370912 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370923 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370931 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370943 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370951 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370964 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370971 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370982 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.370990 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.370998 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371006 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371016 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371023 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371035 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" containerName="installer" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371042 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" containerName="installer" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371054 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371062 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371077 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371087 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371096 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371195 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="extract-content" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371222 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="extract-utilities" Jan 29 10:46:23 crc kubenswrapper[4852]: E0129 10:46:23.371233 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371241 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371354 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce2c002-428f-4b74-b25a-b9e4c9ed11d9" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371369 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b545ab65-c670-46ea-9f48-518113c3387e" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371378 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9409abb2-eda9-4b1c-ab1b-4fcd886879a0" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371386 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1329aa3-b350-44b9-959e-ce3730a07103" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371399 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="92ae5b88-40b5-4eec-aeeb-19ec6b2ffaa7" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371409 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a56c210f-0186-49e4-b21c-bf46c22ab3dd" containerName="oauth-openshift" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371418 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb3421a0-7b6e-40b6-9f51-df284594f711" containerName="registry-server" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371430 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="37f90abf-9c0f-402f-9adf-2b9d4eeb0b44" containerName="installer" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371439 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.371902 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.374365 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.375821 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.375926 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.375998 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376089 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376160 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376219 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376244 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376743 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376786 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376803 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.376970 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.389183 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.392206 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt"] Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.402057 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.403430 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.444959 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483410 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483455 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483480 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-error\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483500 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483562 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-login\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483614 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-session\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483636 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbntt\" (UniqueName: \"kubernetes.io/projected/4494e477-8fcb-42c3-8ace-9950e6103549-kube-api-access-qbntt\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483660 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483689 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-audit-policies\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483706 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483721 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4494e477-8fcb-42c3-8ace-9950e6103549-audit-dir\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483846 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.483986 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.484036 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.584845 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4494e477-8fcb-42c3-8ace-9950e6103549-audit-dir\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.584930 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4494e477-8fcb-42c3-8ace-9950e6103549-audit-dir\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.585577 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.585885 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.585937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.585965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586018 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586045 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-error\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586076 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586122 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-login\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586163 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-session\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586190 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbntt\" (UniqueName: \"kubernetes.io/projected/4494e477-8fcb-42c3-8ace-9950e6103549-kube-api-access-qbntt\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586236 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586304 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-audit-policies\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.586347 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.587264 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-audit-policies\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.587359 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.587520 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-service-ca\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.587875 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.591557 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-error\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.599348 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.599599 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.599890 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-session\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.600068 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-template-login\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.600430 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-router-certs\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.611125 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.611163 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4494e477-8fcb-42c3-8ace-9950e6103549-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.615885 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbntt\" (UniqueName: \"kubernetes.io/projected/4494e477-8fcb-42c3-8ace-9950e6103549-kube-api-access-qbntt\") pod \"oauth-openshift-6cc7c68bbf-krrjt\" (UID: \"4494e477-8fcb-42c3-8ace-9950e6103549\") " pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.635602 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.698572 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.701288 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.906247 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt"] Jan 29 10:46:23 crc kubenswrapper[4852]: I0129 10:46:23.982166 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.098842 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.383571 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.495392 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.734844 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.921112 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" event={"ID":"4494e477-8fcb-42c3-8ace-9950e6103549","Type":"ContainerStarted","Data":"f8fcf812178ea4891a66cbff4058f9e349e58033952ec0bb66ebad89e015fc18"} Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.921454 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.921855 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" event={"ID":"4494e477-8fcb-42c3-8ace-9950e6103549","Type":"ContainerStarted","Data":"378fcfc3800f42921b16f6aa992d53829942430b3c2a2dae0520616c65cc11a8"} Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.926382 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" Jan 29 10:46:24 crc kubenswrapper[4852]: I0129 10:46:24.941181 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6cc7c68bbf-krrjt" podStartSLOduration=51.941162795 podStartE2EDuration="51.941162795s" podCreationTimestamp="2026-01-29 10:45:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:46:24.941110923 +0000 UTC m=+282.158442067" watchObservedRunningTime="2026-01-29 10:46:24.941162795 +0000 UTC m=+282.158493919" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.112102 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.388988 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.389064 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517427 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517522 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517552 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517599 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517633 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517671 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517721 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517727 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517775 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.517990 4852 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.518004 4852 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.518012 4852 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.518021 4852 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.525104 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.619308 4852 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.928688 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.928756 4852 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6" exitCode=137 Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.928815 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.928838 4852 scope.go:117] "RemoveContainer" containerID="af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.979099 4852 scope.go:117] "RemoveContainer" containerID="af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6" Jan 29 10:46:25 crc kubenswrapper[4852]: E0129 10:46:25.979671 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6\": container with ID starting with af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6 not found: ID does not exist" containerID="af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6" Jan 29 10:46:25 crc kubenswrapper[4852]: I0129 10:46:25.979711 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6"} err="failed to get container status \"af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6\": rpc error: code = NotFound desc = could not find container \"af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6\": container with ID starting with af42e042d6e2a5f9f956cceaa5337006cae755e387adb15587f3e0bd7d77d0b6 not found: ID does not exist" Jan 29 10:46:27 crc kubenswrapper[4852]: I0129 10:46:27.471402 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 29 10:46:43 crc kubenswrapper[4852]: I0129 10:46:43.258326 4852 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 29 10:46:45 crc kubenswrapper[4852]: I0129 10:46:45.028259 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 29 10:46:45 crc kubenswrapper[4852]: I0129 10:46:45.031058 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 10:46:45 crc kubenswrapper[4852]: I0129 10:46:45.031118 4852 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="19602dd5c212b30e348d320d66e4d8b4d022bf0ab776f245fbc9e7f2e86b4a7d" exitCode=137 Jan 29 10:46:45 crc kubenswrapper[4852]: I0129 10:46:45.031154 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"19602dd5c212b30e348d320d66e4d8b4d022bf0ab776f245fbc9e7f2e86b4a7d"} Jan 29 10:46:45 crc kubenswrapper[4852]: I0129 10:46:45.031197 4852 scope.go:117] "RemoveContainer" containerID="021b972f3595d9d4335c01b203476e9902b702b86cbbf9c72a03466f2cc863ae" Jan 29 10:46:46 crc kubenswrapper[4852]: I0129 10:46:46.037343 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 29 10:46:46 crc kubenswrapper[4852]: I0129 10:46:46.039123 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8f531e9803bdcda5529e39540e806554932f34f4b7288537497222e9d2ecc357"} Jan 29 10:46:50 crc kubenswrapper[4852]: I0129 10:46:50.533292 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:54 crc kubenswrapper[4852]: I0129 10:46:54.328941 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:54 crc kubenswrapper[4852]: I0129 10:46:54.333252 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:55 crc kubenswrapper[4852]: I0129 10:46:55.078759 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 10:46:55 crc kubenswrapper[4852]: I0129 10:46:55.082435 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 10:46:56 crc kubenswrapper[4852]: I0129 10:46:56.984245 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h2lzr"] Jan 29 10:46:56 crc kubenswrapper[4852]: I0129 10:46:56.986449 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:56 crc kubenswrapper[4852]: I0129 10:46:56.989214 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 10:46:56 crc kubenswrapper[4852]: I0129 10:46:56.994630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h2lzr"] Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.184491 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-catalog-content\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.184787 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5shx5\" (UniqueName: \"kubernetes.io/projected/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-kube-api-access-5shx5\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.184813 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-utilities\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.285947 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-utilities\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.286006 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5shx5\" (UniqueName: \"kubernetes.io/projected/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-kube-api-access-5shx5\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.286075 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-catalog-content\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.286505 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-utilities\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.286885 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-catalog-content\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.310643 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5shx5\" (UniqueName: \"kubernetes.io/projected/42b49ee7-0bb4-43b0-ba65-2ec3c09c993c-kube-api-access-5shx5\") pod \"community-operators-h2lzr\" (UID: \"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c\") " pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.601065 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:46:57 crc kubenswrapper[4852]: I0129 10:46:57.776441 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h2lzr"] Jan 29 10:46:58 crc kubenswrapper[4852]: I0129 10:46:58.095188 4852 generic.go:334] "Generic (PLEG): container finished" podID="42b49ee7-0bb4-43b0-ba65-2ec3c09c993c" containerID="5171775d8f3e784ee3276c62ea0671b69ef214a46cc1d55fee4ed60d62ebf094" exitCode=0 Jan 29 10:46:58 crc kubenswrapper[4852]: I0129 10:46:58.095275 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2lzr" event={"ID":"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c","Type":"ContainerDied","Data":"5171775d8f3e784ee3276c62ea0671b69ef214a46cc1d55fee4ed60d62ebf094"} Jan 29 10:46:58 crc kubenswrapper[4852]: I0129 10:46:58.095452 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2lzr" event={"ID":"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c","Type":"ContainerStarted","Data":"a3a341ada33f332ff0a845f37421d635cb54d8a752d717fb649e75c906920e84"} Jan 29 10:46:59 crc kubenswrapper[4852]: I0129 10:46:59.102019 4852 generic.go:334] "Generic (PLEG): container finished" podID="42b49ee7-0bb4-43b0-ba65-2ec3c09c993c" containerID="23dcd3ce29474208b50e0127ef5356d0567f3d9aa9cdab8be7fe8735c2224148" exitCode=0 Jan 29 10:46:59 crc kubenswrapper[4852]: I0129 10:46:59.102076 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2lzr" event={"ID":"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c","Type":"ContainerDied","Data":"23dcd3ce29474208b50e0127ef5356d0567f3d9aa9cdab8be7fe8735c2224148"} Jan 29 10:47:00 crc kubenswrapper[4852]: I0129 10:47:00.108350 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2lzr" event={"ID":"42b49ee7-0bb4-43b0-ba65-2ec3c09c993c","Type":"ContainerStarted","Data":"808d7e2f47f3596a3ab5865d51c48ebce7b1f1fb8b48a695cbe4b5cb2beecc05"} Jan 29 10:47:00 crc kubenswrapper[4852]: I0129 10:47:00.133747 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h2lzr" podStartSLOduration=2.754423012 podStartE2EDuration="4.133729903s" podCreationTimestamp="2026-01-29 10:46:56 +0000 UTC" firstStartedPulling="2026-01-29 10:46:58.096545231 +0000 UTC m=+315.313876375" lastFinishedPulling="2026-01-29 10:46:59.475852132 +0000 UTC m=+316.693183266" observedRunningTime="2026-01-29 10:47:00.12955932 +0000 UTC m=+317.346890454" watchObservedRunningTime="2026-01-29 10:47:00.133729903 +0000 UTC m=+317.351061037" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.583553 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tn8zl"] Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.584781 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.601739 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tn8zl"] Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.664752 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.664949 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerName="controller-manager" containerID="cri-o://f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d" gracePeriod=30 Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.737982 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l4vb\" (UniqueName: \"kubernetes.io/projected/d4141fa7-4395-49ab-a156-538f6aaa9093-kube-api-access-5l4vb\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.738333 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-utilities\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.738375 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-catalog-content\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.773165 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.773393 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" podUID="a5ff2962-1c70-420c-9003-9339c14eca14" containerName="route-controller-manager" containerID="cri-o://82370cd0d3b2bc0cb12918cf0430e7c537f1834667d01367b707b0d4da8d2f98" gracePeriod=30 Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.839433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-catalog-content\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.839539 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l4vb\" (UniqueName: \"kubernetes.io/projected/d4141fa7-4395-49ab-a156-538f6aaa9093-kube-api-access-5l4vb\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.839575 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-utilities\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.840190 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-utilities\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.840461 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4141fa7-4395-49ab-a156-538f6aaa9093-catalog-content\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.874544 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l4vb\" (UniqueName: \"kubernetes.io/projected/d4141fa7-4395-49ab-a156-538f6aaa9093-kube-api-access-5l4vb\") pod \"community-operators-tn8zl\" (UID: \"d4141fa7-4395-49ab-a156-538f6aaa9093\") " pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:01 crc kubenswrapper[4852]: I0129 10:47:01.903052 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.072352 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.131191 4852 generic.go:334] "Generic (PLEG): container finished" podID="a5ff2962-1c70-420c-9003-9339c14eca14" containerID="82370cd0d3b2bc0cb12918cf0430e7c537f1834667d01367b707b0d4da8d2f98" exitCode=0 Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.131262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" event={"ID":"a5ff2962-1c70-420c-9003-9339c14eca14","Type":"ContainerDied","Data":"82370cd0d3b2bc0cb12918cf0430e7c537f1834667d01367b707b0d4da8d2f98"} Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.140219 4852 generic.go:334] "Generic (PLEG): container finished" podID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerID="f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d" exitCode=0 Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.140266 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" event={"ID":"43ee8959-7d50-4e2c-93d8-70de85191fc3","Type":"ContainerDied","Data":"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d"} Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.140297 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" event={"ID":"43ee8959-7d50-4e2c-93d8-70de85191fc3","Type":"ContainerDied","Data":"03eb5a9f30c906e8c42833f4bce58c2d21b0ed1b6df378ce910432feeaf12063"} Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.140319 4852 scope.go:117] "RemoveContainer" containerID="f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.140490 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h7cn5" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.174148 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.175911 4852 scope.go:117] "RemoveContainer" containerID="f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d" Jan 29 10:47:02 crc kubenswrapper[4852]: E0129 10:47:02.185777 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d\": container with ID starting with f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d not found: ID does not exist" containerID="f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.185843 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d"} err="failed to get container status \"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d\": rpc error: code = NotFound desc = could not find container \"f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d\": container with ID starting with f6aaedfd1db11641aaa90d62079a27825346a66bddaf119ecf1a302756936a4d not found: ID does not exist" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.190705 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tn8zl"] Jan 29 10:47:02 crc kubenswrapper[4852]: W0129 10:47:02.190761 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4141fa7_4395_49ab_a156_538f6aaa9093.slice/crio-42673a007f6b7767af6cf535591df08791a372ccade91c1801465ff41a6d826b WatchSource:0}: Error finding container 42673a007f6b7767af6cf535591df08791a372ccade91c1801465ff41a6d826b: Status 404 returned error can't find the container with id 42673a007f6b7767af6cf535591df08791a372ccade91c1801465ff41a6d826b Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.245803 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert\") pod \"43ee8959-7d50-4e2c-93d8-70de85191fc3\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.245856 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca\") pod \"43ee8959-7d50-4e2c-93d8-70de85191fc3\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.245929 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config\") pod \"43ee8959-7d50-4e2c-93d8-70de85191fc3\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.245973 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles\") pod \"43ee8959-7d50-4e2c-93d8-70de85191fc3\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.246007 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7glg\" (UniqueName: \"kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg\") pod \"43ee8959-7d50-4e2c-93d8-70de85191fc3\" (UID: \"43ee8959-7d50-4e2c-93d8-70de85191fc3\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.246952 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca" (OuterVolumeSpecName: "client-ca") pod "43ee8959-7d50-4e2c-93d8-70de85191fc3" (UID: "43ee8959-7d50-4e2c-93d8-70de85191fc3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.247455 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "43ee8959-7d50-4e2c-93d8-70de85191fc3" (UID: "43ee8959-7d50-4e2c-93d8-70de85191fc3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.247643 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config" (OuterVolumeSpecName: "config") pod "43ee8959-7d50-4e2c-93d8-70de85191fc3" (UID: "43ee8959-7d50-4e2c-93d8-70de85191fc3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.253515 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "43ee8959-7d50-4e2c-93d8-70de85191fc3" (UID: "43ee8959-7d50-4e2c-93d8-70de85191fc3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.253561 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg" (OuterVolumeSpecName: "kube-api-access-p7glg") pod "43ee8959-7d50-4e2c-93d8-70de85191fc3" (UID: "43ee8959-7d50-4e2c-93d8-70de85191fc3"). InnerVolumeSpecName "kube-api-access-p7glg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.347743 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca\") pod \"a5ff2962-1c70-420c-9003-9339c14eca14\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.347852 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4tqc\" (UniqueName: \"kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc\") pod \"a5ff2962-1c70-420c-9003-9339c14eca14\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.347896 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert\") pod \"a5ff2962-1c70-420c-9003-9339c14eca14\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.347948 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config\") pod \"a5ff2962-1c70-420c-9003-9339c14eca14\" (UID: \"a5ff2962-1c70-420c-9003-9339c14eca14\") " Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.348998 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca" (OuterVolumeSpecName: "client-ca") pod "a5ff2962-1c70-420c-9003-9339c14eca14" (UID: "a5ff2962-1c70-420c-9003-9339c14eca14"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349117 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349138 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349152 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7glg\" (UniqueName: \"kubernetes.io/projected/43ee8959-7d50-4e2c-93d8-70de85191fc3-kube-api-access-p7glg\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349163 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43ee8959-7d50-4e2c-93d8-70de85191fc3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349173 4852 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/43ee8959-7d50-4e2c-93d8-70de85191fc3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.349744 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config" (OuterVolumeSpecName: "config") pod "a5ff2962-1c70-420c-9003-9339c14eca14" (UID: "a5ff2962-1c70-420c-9003-9339c14eca14"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.351818 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc" (OuterVolumeSpecName: "kube-api-access-x4tqc") pod "a5ff2962-1c70-420c-9003-9339c14eca14" (UID: "a5ff2962-1c70-420c-9003-9339c14eca14"). InnerVolumeSpecName "kube-api-access-x4tqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.353072 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a5ff2962-1c70-420c-9003-9339c14eca14" (UID: "a5ff2962-1c70-420c-9003-9339c14eca14"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.450216 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4tqc\" (UniqueName: \"kubernetes.io/projected/a5ff2962-1c70-420c-9003-9339c14eca14-kube-api-access-x4tqc\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.450264 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ff2962-1c70-420c-9003-9339c14eca14-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.450276 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.450284 4852 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5ff2962-1c70-420c-9003-9339c14eca14-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.475031 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:47:02 crc kubenswrapper[4852]: I0129 10:47:02.478709 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h7cn5"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.146135 4852 generic.go:334] "Generic (PLEG): container finished" podID="d4141fa7-4395-49ab-a156-538f6aaa9093" containerID="982c2cbc30c933f1ad22a5fb2de8d18ad58afa1c2e5f58786602870bae9fa820" exitCode=0 Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.146184 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tn8zl" event={"ID":"d4141fa7-4395-49ab-a156-538f6aaa9093","Type":"ContainerDied","Data":"982c2cbc30c933f1ad22a5fb2de8d18ad58afa1c2e5f58786602870bae9fa820"} Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.146490 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tn8zl" event={"ID":"d4141fa7-4395-49ab-a156-538f6aaa9093","Type":"ContainerStarted","Data":"42673a007f6b7767af6cf535591df08791a372ccade91c1801465ff41a6d826b"} Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.149526 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.149535 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz" event={"ID":"a5ff2962-1c70-420c-9003-9339c14eca14","Type":"ContainerDied","Data":"c6c3682ad4c6ccead9c46b4767ee676725254ff66f21743d0663b3e92b8feab2"} Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.149625 4852 scope.go:117] "RemoveContainer" containerID="82370cd0d3b2bc0cb12918cf0430e7c537f1834667d01367b707b0d4da8d2f98" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.176752 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.179559 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-k9fzz"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.430802 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt"] Jan 29 10:47:03 crc kubenswrapper[4852]: E0129 10:47:03.431074 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ff2962-1c70-420c-9003-9339c14eca14" containerName="route-controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.431089 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ff2962-1c70-420c-9003-9339c14eca14" containerName="route-controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: E0129 10:47:03.431108 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerName="controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.431116 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerName="controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.431231 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" containerName="controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.431244 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5ff2962-1c70-420c-9003-9339c14eca14" containerName="route-controller-manager" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.431718 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.433659 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.434218 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.434813 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.435010 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.435108 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.435489 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.437113 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.437177 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.437522 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.437696 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.437864 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.438136 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.438700 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.439393 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.449951 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.452894 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.457632 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt"] Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.481038 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43ee8959-7d50-4e2c-93d8-70de85191fc3" path="/var/lib/kubelet/pods/43ee8959-7d50-4e2c-93d8-70de85191fc3/volumes" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.481540 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5ff2962-1c70-420c-9003-9339c14eca14" path="/var/lib/kubelet/pods/a5ff2962-1c70-420c-9003-9339c14eca14/volumes" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576077 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576171 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-config\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576203 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d244e9e-516a-4c54-8482-31473600703f-serving-cert\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576227 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-client-ca\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576390 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lffpp\" (UniqueName: \"kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576430 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4w2p\" (UniqueName: \"kubernetes.io/projected/7d244e9e-516a-4c54-8482-31473600703f-kube-api-access-m4w2p\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.576536 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677126 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-config\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677169 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d244e9e-516a-4c54-8482-31473600703f-serving-cert\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677191 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-client-ca\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677210 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677228 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lffpp\" (UniqueName: \"kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677249 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4w2p\" (UniqueName: \"kubernetes.io/projected/7d244e9e-516a-4c54-8482-31473600703f-kube-api-access-m4w2p\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.677274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.678052 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.678096 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.678322 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-client-ca\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.678350 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.678464 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d244e9e-516a-4c54-8482-31473600703f-config\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.679177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.681021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.681142 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d244e9e-516a-4c54-8482-31473600703f-serving-cert\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.681276 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.698444 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lffpp\" (UniqueName: \"kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp\") pod \"controller-manager-fd87549dd-qfc47\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.700701 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4w2p\" (UniqueName: \"kubernetes.io/projected/7d244e9e-516a-4c54-8482-31473600703f-kube-api-access-m4w2p\") pod \"route-controller-manager-64ff8666d7-znnvt\" (UID: \"7d244e9e-516a-4c54-8482-31473600703f\") " pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.744914 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:03 crc kubenswrapper[4852]: I0129 10:47:03.754268 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:04 crc kubenswrapper[4852]: I0129 10:47:04.044919 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:47:04 crc kubenswrapper[4852]: I0129 10:47:04.057116 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt"] Jan 29 10:47:04 crc kubenswrapper[4852]: W0129 10:47:04.064614 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d244e9e_516a_4c54_8482_31473600703f.slice/crio-c3a6aecc300539bc7a0a1c683cbc88b1d824e9289adb475a545aa5de246bde0a WatchSource:0}: Error finding container c3a6aecc300539bc7a0a1c683cbc88b1d824e9289adb475a545aa5de246bde0a: Status 404 returned error can't find the container with id c3a6aecc300539bc7a0a1c683cbc88b1d824e9289adb475a545aa5de246bde0a Jan 29 10:47:04 crc kubenswrapper[4852]: I0129 10:47:04.162706 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tn8zl" event={"ID":"d4141fa7-4395-49ab-a156-538f6aaa9093","Type":"ContainerStarted","Data":"be29a68c53df725efc3909c7d618b732bb85f0c14071439c30f125dce0f73958"} Jan 29 10:47:04 crc kubenswrapper[4852]: I0129 10:47:04.165810 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" event={"ID":"7d244e9e-516a-4c54-8482-31473600703f","Type":"ContainerStarted","Data":"c3a6aecc300539bc7a0a1c683cbc88b1d824e9289adb475a545aa5de246bde0a"} Jan 29 10:47:04 crc kubenswrapper[4852]: I0129 10:47:04.168415 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" event={"ID":"399eaed7-c945-4929-a130-8c11e0298fbb","Type":"ContainerStarted","Data":"79e222cb13b52bbdbdb58bda4479841d0c0fcc810f45d4f51bc4dd8b72344e23"} Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.176022 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" event={"ID":"399eaed7-c945-4929-a130-8c11e0298fbb","Type":"ContainerStarted","Data":"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07"} Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.176317 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.178338 4852 generic.go:334] "Generic (PLEG): container finished" podID="d4141fa7-4395-49ab-a156-538f6aaa9093" containerID="be29a68c53df725efc3909c7d618b732bb85f0c14071439c30f125dce0f73958" exitCode=0 Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.178378 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tn8zl" event={"ID":"d4141fa7-4395-49ab-a156-538f6aaa9093","Type":"ContainerDied","Data":"be29a68c53df725efc3909c7d618b732bb85f0c14071439c30f125dce0f73958"} Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.180234 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" event={"ID":"7d244e9e-516a-4c54-8482-31473600703f","Type":"ContainerStarted","Data":"8d09c0ae2dd250f10d154c8d1a9dddc1258b812912113a1d163abf5e1142bddc"} Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.180790 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.181802 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.187259 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.199298 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" podStartSLOduration=4.199280039 podStartE2EDuration="4.199280039s" podCreationTimestamp="2026-01-29 10:47:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:47:05.197433584 +0000 UTC m=+322.414764718" watchObservedRunningTime="2026-01-29 10:47:05.199280039 +0000 UTC m=+322.416611173" Jan 29 10:47:05 crc kubenswrapper[4852]: I0129 10:47:05.257898 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64ff8666d7-znnvt" podStartSLOduration=4.257877758 podStartE2EDuration="4.257877758s" podCreationTimestamp="2026-01-29 10:47:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:47:05.231519841 +0000 UTC m=+322.448850995" watchObservedRunningTime="2026-01-29 10:47:05.257877758 +0000 UTC m=+322.475208902" Jan 29 10:47:06 crc kubenswrapper[4852]: I0129 10:47:06.188625 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tn8zl" event={"ID":"d4141fa7-4395-49ab-a156-538f6aaa9093","Type":"ContainerStarted","Data":"4d0048f8c7e123d2c5296708a7cc8273589ad6537c2e7a48c6013dea97a47e02"} Jan 29 10:47:06 crc kubenswrapper[4852]: I0129 10:47:06.211458 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tn8zl" podStartSLOduration=2.740048506 podStartE2EDuration="5.211432717s" podCreationTimestamp="2026-01-29 10:47:01 +0000 UTC" firstStartedPulling="2026-01-29 10:47:03.148184436 +0000 UTC m=+320.365515570" lastFinishedPulling="2026-01-29 10:47:05.619568647 +0000 UTC m=+322.836899781" observedRunningTime="2026-01-29 10:47:06.208728881 +0000 UTC m=+323.426060025" watchObservedRunningTime="2026-01-29 10:47:06.211432717 +0000 UTC m=+323.428763861" Jan 29 10:47:07 crc kubenswrapper[4852]: I0129 10:47:07.602216 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:47:07 crc kubenswrapper[4852]: I0129 10:47:07.603498 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:47:07 crc kubenswrapper[4852]: I0129 10:47:07.647308 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:47:08 crc kubenswrapper[4852]: I0129 10:47:08.239210 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h2lzr" Jan 29 10:47:11 crc kubenswrapper[4852]: I0129 10:47:11.903454 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:11 crc kubenswrapper[4852]: I0129 10:47:11.903751 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:11 crc kubenswrapper[4852]: I0129 10:47:11.940724 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:12 crc kubenswrapper[4852]: I0129 10:47:12.247300 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tn8zl" Jan 29 10:47:19 crc kubenswrapper[4852]: I0129 10:47:19.978379 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-df8ql"] Jan 29 10:47:19 crc kubenswrapper[4852]: I0129 10:47:19.979961 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:19 crc kubenswrapper[4852]: I0129 10:47:19.985604 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-df8ql"] Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.089069 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7x4s\" (UniqueName: \"kubernetes.io/projected/349eef38-2a3f-44a3-b5f1-50582e6c34ca-kube-api-access-v7x4s\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.089124 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-catalog-content\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.089156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-utilities\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.191318 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7x4s\" (UniqueName: \"kubernetes.io/projected/349eef38-2a3f-44a3-b5f1-50582e6c34ca-kube-api-access-v7x4s\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.191383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-catalog-content\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.191428 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-utilities\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.191956 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-utilities\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.192151 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349eef38-2a3f-44a3-b5f1-50582e6c34ca-catalog-content\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.215500 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7x4s\" (UniqueName: \"kubernetes.io/projected/349eef38-2a3f-44a3-b5f1-50582e6c34ca-kube-api-access-v7x4s\") pod \"community-operators-df8ql\" (UID: \"349eef38-2a3f-44a3-b5f1-50582e6c34ca\") " pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.295513 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:20 crc kubenswrapper[4852]: I0129 10:47:20.700541 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-df8ql"] Jan 29 10:47:20 crc kubenswrapper[4852]: W0129 10:47:20.710799 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod349eef38_2a3f_44a3_b5f1_50582e6c34ca.slice/crio-2df8ce18515c19055fc640f146d3806cc16dfe9d7989f732ca31225fcea33e26 WatchSource:0}: Error finding container 2df8ce18515c19055fc640f146d3806cc16dfe9d7989f732ca31225fcea33e26: Status 404 returned error can't find the container with id 2df8ce18515c19055fc640f146d3806cc16dfe9d7989f732ca31225fcea33e26 Jan 29 10:47:21 crc kubenswrapper[4852]: I0129 10:47:21.269831 4852 generic.go:334] "Generic (PLEG): container finished" podID="349eef38-2a3f-44a3-b5f1-50582e6c34ca" containerID="5d919fcb4e0ba60dffdb7cba152616073b0807a6cf18bc67ed4e017fdf6d1619" exitCode=0 Jan 29 10:47:21 crc kubenswrapper[4852]: I0129 10:47:21.269921 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-df8ql" event={"ID":"349eef38-2a3f-44a3-b5f1-50582e6c34ca","Type":"ContainerDied","Data":"5d919fcb4e0ba60dffdb7cba152616073b0807a6cf18bc67ed4e017fdf6d1619"} Jan 29 10:47:21 crc kubenswrapper[4852]: I0129 10:47:21.270233 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-df8ql" event={"ID":"349eef38-2a3f-44a3-b5f1-50582e6c34ca","Type":"ContainerStarted","Data":"2df8ce18515c19055fc640f146d3806cc16dfe9d7989f732ca31225fcea33e26"} Jan 29 10:47:23 crc kubenswrapper[4852]: I0129 10:47:23.281306 4852 generic.go:334] "Generic (PLEG): container finished" podID="349eef38-2a3f-44a3-b5f1-50582e6c34ca" containerID="0f2006baad932c294aee101bd1ee589787958e8c95a4d11830b2ed33a9b6db16" exitCode=0 Jan 29 10:47:23 crc kubenswrapper[4852]: I0129 10:47:23.281348 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-df8ql" event={"ID":"349eef38-2a3f-44a3-b5f1-50582e6c34ca","Type":"ContainerDied","Data":"0f2006baad932c294aee101bd1ee589787958e8c95a4d11830b2ed33a9b6db16"} Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.104310 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-t42hr"] Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.104982 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.119550 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-t42hr"] Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.252872 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr68k\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-kube-api-access-qr68k\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.252928 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.252987 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.253024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-certificates\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.253068 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-bound-sa-token\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.253298 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-tls\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.253342 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-trusted-ca\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.253370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.276652 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.307210 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-df8ql" event={"ID":"349eef38-2a3f-44a3-b5f1-50582e6c34ca","Type":"ContainerStarted","Data":"cba78860b41cc868a63b025513b9606597b06230d769be49b4111958cd50f385"} Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.330391 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-df8ql" podStartSLOduration=3.448183393 podStartE2EDuration="6.330370782s" podCreationTimestamp="2026-01-29 10:47:19 +0000 UTC" firstStartedPulling="2026-01-29 10:47:21.271773185 +0000 UTC m=+338.489104309" lastFinishedPulling="2026-01-29 10:47:24.153960544 +0000 UTC m=+341.371291698" observedRunningTime="2026-01-29 10:47:25.326423553 +0000 UTC m=+342.543754687" watchObservedRunningTime="2026-01-29 10:47:25.330370782 +0000 UTC m=+342.547701916" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354353 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr68k\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-kube-api-access-qr68k\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354404 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354426 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-certificates\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354461 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-bound-sa-token\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354494 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-tls\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354520 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-trusted-ca\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354540 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.354966 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-ca-trust-extracted\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.355871 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-trusted-ca\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.355928 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-certificates\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.361142 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-registry-tls\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.361655 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-installation-pull-secrets\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.372674 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr68k\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-kube-api-access-qr68k\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.372964 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ab6d536-8511-49fb-8442-f4cd3d8d9a54-bound-sa-token\") pod \"image-registry-66df7c8f76-t42hr\" (UID: \"4ab6d536-8511-49fb-8442-f4cd3d8d9a54\") " pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.419180 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:25 crc kubenswrapper[4852]: I0129 10:47:25.878554 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-t42hr"] Jan 29 10:47:26 crc kubenswrapper[4852]: I0129 10:47:26.314174 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" event={"ID":"4ab6d536-8511-49fb-8442-f4cd3d8d9a54","Type":"ContainerStarted","Data":"18411d1c1104f3437e457672d78e5c8b4e57320bd9291775fd5b5670a46492ab"} Jan 29 10:47:26 crc kubenswrapper[4852]: I0129 10:47:26.314210 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" event={"ID":"4ab6d536-8511-49fb-8442-f4cd3d8d9a54","Type":"ContainerStarted","Data":"f2f421e3ee2c9b11a54edea37c9e94563fac789c8825ec91707e76cab2db3ea8"} Jan 29 10:47:26 crc kubenswrapper[4852]: I0129 10:47:26.314247 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:26 crc kubenswrapper[4852]: I0129 10:47:26.330499 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" podStartSLOduration=1.330479478 podStartE2EDuration="1.330479478s" podCreationTimestamp="2026-01-29 10:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:47:26.327919304 +0000 UTC m=+343.545250438" watchObservedRunningTime="2026-01-29 10:47:26.330479478 +0000 UTC m=+343.547810632" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.013538 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qt4xt"] Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.015565 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.024140 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qt4xt"] Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.179242 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-catalog-content\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.179467 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlxzb\" (UniqueName: \"kubernetes.io/projected/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-kube-api-access-jlxzb\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.179624 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-utilities\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.280723 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-catalog-content\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.280840 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlxzb\" (UniqueName: \"kubernetes.io/projected/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-kube-api-access-jlxzb\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.280874 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-utilities\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.281332 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-catalog-content\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.282018 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-utilities\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.307740 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlxzb\" (UniqueName: \"kubernetes.io/projected/45f6e7f7-6b36-4deb-969d-fbc2bdeee284-kube-api-access-jlxzb\") pod \"community-operators-qt4xt\" (UID: \"45f6e7f7-6b36-4deb-969d-fbc2bdeee284\") " pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.334319 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:27 crc kubenswrapper[4852]: I0129 10:47:27.746420 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qt4xt"] Jan 29 10:47:27 crc kubenswrapper[4852]: W0129 10:47:27.751340 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45f6e7f7_6b36_4deb_969d_fbc2bdeee284.slice/crio-6af4fef5683e97adc1fb178eb1ff323ae764609c74b369f3eb714192a15eb593 WatchSource:0}: Error finding container 6af4fef5683e97adc1fb178eb1ff323ae764609c74b369f3eb714192a15eb593: Status 404 returned error can't find the container with id 6af4fef5683e97adc1fb178eb1ff323ae764609c74b369f3eb714192a15eb593 Jan 29 10:47:28 crc kubenswrapper[4852]: I0129 10:47:28.329886 4852 generic.go:334] "Generic (PLEG): container finished" podID="45f6e7f7-6b36-4deb-969d-fbc2bdeee284" containerID="7e2947b47dffb68748d6614d128b663ae656c51d40183cba907d2045e6daea58" exitCode=0 Jan 29 10:47:28 crc kubenswrapper[4852]: I0129 10:47:28.330027 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt4xt" event={"ID":"45f6e7f7-6b36-4deb-969d-fbc2bdeee284","Type":"ContainerDied","Data":"7e2947b47dffb68748d6614d128b663ae656c51d40183cba907d2045e6daea58"} Jan 29 10:47:28 crc kubenswrapper[4852]: I0129 10:47:28.330077 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt4xt" event={"ID":"45f6e7f7-6b36-4deb-969d-fbc2bdeee284","Type":"ContainerStarted","Data":"6af4fef5683e97adc1fb178eb1ff323ae764609c74b369f3eb714192a15eb593"} Jan 29 10:47:29 crc kubenswrapper[4852]: I0129 10:47:29.337572 4852 generic.go:334] "Generic (PLEG): container finished" podID="45f6e7f7-6b36-4deb-969d-fbc2bdeee284" containerID="a18763dc8881c6dd7bb452dacc10971d2aac7bc13ef332e878ebb5b13d176659" exitCode=0 Jan 29 10:47:29 crc kubenswrapper[4852]: I0129 10:47:29.337619 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt4xt" event={"ID":"45f6e7f7-6b36-4deb-969d-fbc2bdeee284","Type":"ContainerDied","Data":"a18763dc8881c6dd7bb452dacc10971d2aac7bc13ef332e878ebb5b13d176659"} Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.295758 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.296055 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.344230 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.344726 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt4xt" event={"ID":"45f6e7f7-6b36-4deb-969d-fbc2bdeee284","Type":"ContainerStarted","Data":"c1762ccdddac41dff1b83ad0740d28874e759de0ba940b5290f749ccecd2ccf3"} Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.379360 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qt4xt" podStartSLOduration=2.968701242 podStartE2EDuration="4.379341151s" podCreationTimestamp="2026-01-29 10:47:26 +0000 UTC" firstStartedPulling="2026-01-29 10:47:28.333021867 +0000 UTC m=+345.550353001" lastFinishedPulling="2026-01-29 10:47:29.743661776 +0000 UTC m=+346.960992910" observedRunningTime="2026-01-29 10:47:30.377981297 +0000 UTC m=+347.595312431" watchObservedRunningTime="2026-01-29 10:47:30.379341151 +0000 UTC m=+347.596672285" Jan 29 10:47:30 crc kubenswrapper[4852]: I0129 10:47:30.386944 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-df8ql" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.543490 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dvfth"] Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.544901 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.554103 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dvfth"] Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.697740 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd5tg\" (UniqueName: \"kubernetes.io/projected/b6ada955-7839-4453-b285-aa8c3f02ef76-kube-api-access-kd5tg\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.697836 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-utilities\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.697862 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-catalog-content\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.798860 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-utilities\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.798922 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-catalog-content\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.798978 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd5tg\" (UniqueName: \"kubernetes.io/projected/b6ada955-7839-4453-b285-aa8c3f02ef76-kube-api-access-kd5tg\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.799472 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-utilities\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.799709 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6ada955-7839-4453-b285-aa8c3f02ef76-catalog-content\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.827952 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd5tg\" (UniqueName: \"kubernetes.io/projected/b6ada955-7839-4453-b285-aa8c3f02ef76-kube-api-access-kd5tg\") pod \"community-operators-dvfth\" (UID: \"b6ada955-7839-4453-b285-aa8c3f02ef76\") " pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:35 crc kubenswrapper[4852]: I0129 10:47:35.865281 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:36 crc kubenswrapper[4852]: I0129 10:47:36.317819 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dvfth"] Jan 29 10:47:36 crc kubenswrapper[4852]: I0129 10:47:36.377625 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dvfth" event={"ID":"b6ada955-7839-4453-b285-aa8c3f02ef76","Type":"ContainerStarted","Data":"30b9272e7b922548f9a119add9c0cf50d724ee1d3e56bddba85d86ad7497bc2e"} Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.335635 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.335945 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.379811 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.384757 4852 generic.go:334] "Generic (PLEG): container finished" podID="b6ada955-7839-4453-b285-aa8c3f02ef76" containerID="923207fe717b6f18e33d89f5079887bee575bbb9ba9e4bc03a22dd6f0ae22794" exitCode=0 Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.385709 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dvfth" event={"ID":"b6ada955-7839-4453-b285-aa8c3f02ef76","Type":"ContainerDied","Data":"923207fe717b6f18e33d89f5079887bee575bbb9ba9e4bc03a22dd6f0ae22794"} Jan 29 10:47:37 crc kubenswrapper[4852]: I0129 10:47:37.442757 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qt4xt" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.145794 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j5pj7"] Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.147404 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.149154 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.157245 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5pj7"] Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.331631 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-utilities\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.331700 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnll5\" (UniqueName: \"kubernetes.io/projected/470d976b-738e-4252-8dfb-30e9b0a5fdbf-kube-api-access-fnll5\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.331913 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-catalog-content\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.347526 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-67jbx"] Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.348465 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.350714 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.360276 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67jbx"] Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.391496 4852 generic.go:334] "Generic (PLEG): container finished" podID="b6ada955-7839-4453-b285-aa8c3f02ef76" containerID="eb322087f5d79a0f44361b483c239bbb6e2a4ff46a46bc206657dde36911821d" exitCode=0 Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.391724 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dvfth" event={"ID":"b6ada955-7839-4453-b285-aa8c3f02ef76","Type":"ContainerDied","Data":"eb322087f5d79a0f44361b483c239bbb6e2a4ff46a46bc206657dde36911821d"} Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.433383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnll5\" (UniqueName: \"kubernetes.io/projected/470d976b-738e-4252-8dfb-30e9b0a5fdbf-kube-api-access-fnll5\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.433486 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-catalog-content\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.433511 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-utilities\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.434012 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-utilities\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.434104 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/470d976b-738e-4252-8dfb-30e9b0a5fdbf-catalog-content\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.450957 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnll5\" (UniqueName: \"kubernetes.io/projected/470d976b-738e-4252-8dfb-30e9b0a5fdbf-kube-api-access-fnll5\") pod \"redhat-marketplace-j5pj7\" (UID: \"470d976b-738e-4252-8dfb-30e9b0a5fdbf\") " pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.463272 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.535287 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-catalog-content\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.535351 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-utilities\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.535381 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqcqz\" (UniqueName: \"kubernetes.io/projected/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-kube-api-access-sqcqz\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.637607 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-utilities\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.637907 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqcqz\" (UniqueName: \"kubernetes.io/projected/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-kube-api-access-sqcqz\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.638008 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-catalog-content\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.638344 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-utilities\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.639606 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-catalog-content\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.665857 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqcqz\" (UniqueName: \"kubernetes.io/projected/c3dfa5e5-86ea-4834-b97e-8d5831bd2f01-kube-api-access-sqcqz\") pod \"redhat-operators-67jbx\" (UID: \"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01\") " pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.903863 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5pj7"] Jan 29 10:47:38 crc kubenswrapper[4852]: W0129 10:47:38.909046 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod470d976b_738e_4252_8dfb_30e9b0a5fdbf.slice/crio-41ca99a0c0cbef44ce5e6230688911e0fb1e5d416eccf0496c5c832adce08400 WatchSource:0}: Error finding container 41ca99a0c0cbef44ce5e6230688911e0fb1e5d416eccf0496c5c832adce08400: Status 404 returned error can't find the container with id 41ca99a0c0cbef44ce5e6230688911e0fb1e5d416eccf0496c5c832adce08400 Jan 29 10:47:38 crc kubenswrapper[4852]: I0129 10:47:38.961330 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.351897 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67jbx"] Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.396662 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67jbx" event={"ID":"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01","Type":"ContainerStarted","Data":"4177243faa42569f3fa43e283d921e871688597fedc3829da5b89409e3dc08b1"} Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.401196 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dvfth" event={"ID":"b6ada955-7839-4453-b285-aa8c3f02ef76","Type":"ContainerStarted","Data":"4dca820a13b9ae25b07be63eff4aae069f2d966a05cd68f8643f836d4b926cd8"} Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.403393 4852 generic.go:334] "Generic (PLEG): container finished" podID="470d976b-738e-4252-8dfb-30e9b0a5fdbf" containerID="3997d47af093bf4dabbea1ab51ff5b9ebe513bcefa623e438b81ea19fb20715f" exitCode=0 Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.403464 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5pj7" event={"ID":"470d976b-738e-4252-8dfb-30e9b0a5fdbf","Type":"ContainerDied","Data":"3997d47af093bf4dabbea1ab51ff5b9ebe513bcefa623e438b81ea19fb20715f"} Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.403491 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5pj7" event={"ID":"470d976b-738e-4252-8dfb-30e9b0a5fdbf","Type":"ContainerStarted","Data":"41ca99a0c0cbef44ce5e6230688911e0fb1e5d416eccf0496c5c832adce08400"} Jan 29 10:47:39 crc kubenswrapper[4852]: I0129 10:47:39.420100 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dvfth" podStartSLOduration=2.875846515 podStartE2EDuration="4.42008334s" podCreationTimestamp="2026-01-29 10:47:35 +0000 UTC" firstStartedPulling="2026-01-29 10:47:37.386928965 +0000 UTC m=+354.604260099" lastFinishedPulling="2026-01-29 10:47:38.93116579 +0000 UTC m=+356.148496924" observedRunningTime="2026-01-29 10:47:39.419381932 +0000 UTC m=+356.636713076" watchObservedRunningTime="2026-01-29 10:47:39.42008334 +0000 UTC m=+356.637414474" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.410266 4852 generic.go:334] "Generic (PLEG): container finished" podID="470d976b-738e-4252-8dfb-30e9b0a5fdbf" containerID="b3e96417d33c61efb692b16478bc9db9db24113573ccb4c3b75f4f7490323390" exitCode=0 Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.410356 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5pj7" event={"ID":"470d976b-738e-4252-8dfb-30e9b0a5fdbf","Type":"ContainerDied","Data":"b3e96417d33c61efb692b16478bc9db9db24113573ccb4c3b75f4f7490323390"} Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.412472 4852 generic.go:334] "Generic (PLEG): container finished" podID="c3dfa5e5-86ea-4834-b97e-8d5831bd2f01" containerID="f0c78ebfab119553f15623f5ed65e2bdf6038623ee65828c2f16a5c65692dda4" exitCode=0 Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.412530 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67jbx" event={"ID":"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01","Type":"ContainerDied","Data":"f0c78ebfab119553f15623f5ed65e2bdf6038623ee65828c2f16a5c65692dda4"} Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.544280 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ljj87"] Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.545430 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.547235 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.557452 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ljj87"] Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.668471 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-catalog-content\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.668871 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-utilities\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.669054 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l8wt\" (UniqueName: \"kubernetes.io/projected/5d09a2d7-28da-404a-9a31-30364cf716a0-kube-api-access-4l8wt\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.770262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l8wt\" (UniqueName: \"kubernetes.io/projected/5d09a2d7-28da-404a-9a31-30364cf716a0-kube-api-access-4l8wt\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.770355 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-catalog-content\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.770377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-utilities\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.770972 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-utilities\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.771211 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d09a2d7-28da-404a-9a31-30364cf716a0-catalog-content\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.792329 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l8wt\" (UniqueName: \"kubernetes.io/projected/5d09a2d7-28da-404a-9a31-30364cf716a0-kube-api-access-4l8wt\") pod \"certified-operators-ljj87\" (UID: \"5d09a2d7-28da-404a-9a31-30364cf716a0\") " pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:40 crc kubenswrapper[4852]: I0129 10:47:40.860151 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:41 crc kubenswrapper[4852]: I0129 10:47:41.252469 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ljj87"] Jan 29 10:47:41 crc kubenswrapper[4852]: W0129 10:47:41.260574 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d09a2d7_28da_404a_9a31_30364cf716a0.slice/crio-51d7f4fe335b4edb98e3e605c5a7b0104a73e327f39e66ff04ba9ae3ac82509e WatchSource:0}: Error finding container 51d7f4fe335b4edb98e3e605c5a7b0104a73e327f39e66ff04ba9ae3ac82509e: Status 404 returned error can't find the container with id 51d7f4fe335b4edb98e3e605c5a7b0104a73e327f39e66ff04ba9ae3ac82509e Jan 29 10:47:41 crc kubenswrapper[4852]: I0129 10:47:41.419325 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljj87" event={"ID":"5d09a2d7-28da-404a-9a31-30364cf716a0","Type":"ContainerStarted","Data":"51d7f4fe335b4edb98e3e605c5a7b0104a73e327f39e66ff04ba9ae3ac82509e"} Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.425767 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5pj7" event={"ID":"470d976b-738e-4252-8dfb-30e9b0a5fdbf","Type":"ContainerStarted","Data":"b3780ad9d41b429727b43fd6c52f30a67ae4eb462c186c6def468272c28b52ac"} Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.428241 4852 generic.go:334] "Generic (PLEG): container finished" podID="5d09a2d7-28da-404a-9a31-30364cf716a0" containerID="7353de520d38590bceae4cdc48913b891cc2f803ea54eb7569c9e26de14115a1" exitCode=0 Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.428289 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljj87" event={"ID":"5d09a2d7-28da-404a-9a31-30364cf716a0","Type":"ContainerDied","Data":"7353de520d38590bceae4cdc48913b891cc2f803ea54eb7569c9e26de14115a1"} Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.430279 4852 generic.go:334] "Generic (PLEG): container finished" podID="c3dfa5e5-86ea-4834-b97e-8d5831bd2f01" containerID="e689f9c886ba9caafd254619508c590a3a476daa87a0f5ebf841330f94598311" exitCode=0 Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.430463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67jbx" event={"ID":"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01","Type":"ContainerDied","Data":"e689f9c886ba9caafd254619508c590a3a476daa87a0f5ebf841330f94598311"} Jan 29 10:47:42 crc kubenswrapper[4852]: I0129 10:47:42.444482 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j5pj7" podStartSLOduration=2.309634995 podStartE2EDuration="4.444465749s" podCreationTimestamp="2026-01-29 10:47:38 +0000 UTC" firstStartedPulling="2026-01-29 10:47:39.404850729 +0000 UTC m=+356.622181863" lastFinishedPulling="2026-01-29 10:47:41.539681483 +0000 UTC m=+358.757012617" observedRunningTime="2026-01-29 10:47:42.442860529 +0000 UTC m=+359.660191663" watchObservedRunningTime="2026-01-29 10:47:42.444465749 +0000 UTC m=+359.661796883" Jan 29 10:47:43 crc kubenswrapper[4852]: I0129 10:47:43.439713 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljj87" event={"ID":"5d09a2d7-28da-404a-9a31-30364cf716a0","Type":"ContainerStarted","Data":"0dc5b25b44981e90c583c07052387ac113aea772cfa18be33612ea5dbf657bec"} Jan 29 10:47:43 crc kubenswrapper[4852]: I0129 10:47:43.442505 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67jbx" event={"ID":"c3dfa5e5-86ea-4834-b97e-8d5831bd2f01","Type":"ContainerStarted","Data":"878bf623dc524f34d948ebca969733f12c210f2430941f085235099593460829"} Jan 29 10:47:43 crc kubenswrapper[4852]: I0129 10:47:43.486457 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-67jbx" podStartSLOduration=2.869120367 podStartE2EDuration="5.48643985s" podCreationTimestamp="2026-01-29 10:47:38 +0000 UTC" firstStartedPulling="2026-01-29 10:47:40.413861988 +0000 UTC m=+357.631193132" lastFinishedPulling="2026-01-29 10:47:43.031181481 +0000 UTC m=+360.248512615" observedRunningTime="2026-01-29 10:47:43.482138293 +0000 UTC m=+360.699469427" watchObservedRunningTime="2026-01-29 10:47:43.48643985 +0000 UTC m=+360.703770974" Jan 29 10:47:44 crc kubenswrapper[4852]: I0129 10:47:44.449290 4852 generic.go:334] "Generic (PLEG): container finished" podID="5d09a2d7-28da-404a-9a31-30364cf716a0" containerID="0dc5b25b44981e90c583c07052387ac113aea772cfa18be33612ea5dbf657bec" exitCode=0 Jan 29 10:47:44 crc kubenswrapper[4852]: I0129 10:47:44.450648 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljj87" event={"ID":"5d09a2d7-28da-404a-9a31-30364cf716a0","Type":"ContainerDied","Data":"0dc5b25b44981e90c583c07052387ac113aea772cfa18be33612ea5dbf657bec"} Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.423337 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-t42hr" Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.457257 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljj87" event={"ID":"5d09a2d7-28da-404a-9a31-30364cf716a0","Type":"ContainerStarted","Data":"87d60e11a76c2da75289b0f25b9c9fe1872d40624ec67f1e14ec2d6c55830c96"} Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.499043 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.503742 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ljj87" podStartSLOduration=2.907135833 podStartE2EDuration="5.503722729s" podCreationTimestamp="2026-01-29 10:47:40 +0000 UTC" firstStartedPulling="2026-01-29 10:47:42.429614277 +0000 UTC m=+359.646945411" lastFinishedPulling="2026-01-29 10:47:45.026201173 +0000 UTC m=+362.243532307" observedRunningTime="2026-01-29 10:47:45.493101793 +0000 UTC m=+362.710432947" watchObservedRunningTime="2026-01-29 10:47:45.503722729 +0000 UTC m=+362.721053863" Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.866055 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.866104 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:45 crc kubenswrapper[4852]: I0129 10:47:45.910156 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:46 crc kubenswrapper[4852]: I0129 10:47:46.511599 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dvfth" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.463452 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.463495 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.500660 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.537637 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j5pj7" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.962782 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:48 crc kubenswrapper[4852]: I0129 10:47:48.962852 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:49 crc kubenswrapper[4852]: I0129 10:47:49.016632 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:49 crc kubenswrapper[4852]: I0129 10:47:49.531246 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-67jbx" Jan 29 10:47:50 crc kubenswrapper[4852]: I0129 10:47:50.861196 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:50 crc kubenswrapper[4852]: I0129 10:47:50.862192 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:50 crc kubenswrapper[4852]: I0129 10:47:50.938913 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:47:51 crc kubenswrapper[4852]: I0129 10:47:51.560421 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ljj87" Jan 29 10:48:00 crc kubenswrapper[4852]: I0129 10:48:00.017530 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:48:00 crc kubenswrapper[4852]: I0129 10:48:00.018144 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:48:07 crc kubenswrapper[4852]: I0129 10:48:07.959669 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:48:07 crc kubenswrapper[4852]: I0129 10:48:07.960904 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" podUID="399eaed7-c945-4929-a130-8c11e0298fbb" containerName="controller-manager" containerID="cri-o://fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07" gracePeriod=30 Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.311263 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.492910 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config\") pod \"399eaed7-c945-4929-a130-8c11e0298fbb\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.492976 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles\") pod \"399eaed7-c945-4929-a130-8c11e0298fbb\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.493012 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lffpp\" (UniqueName: \"kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp\") pod \"399eaed7-c945-4929-a130-8c11e0298fbb\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.493082 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert\") pod \"399eaed7-c945-4929-a130-8c11e0298fbb\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.493105 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca\") pod \"399eaed7-c945-4929-a130-8c11e0298fbb\" (UID: \"399eaed7-c945-4929-a130-8c11e0298fbb\") " Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.493898 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca" (OuterVolumeSpecName: "client-ca") pod "399eaed7-c945-4929-a130-8c11e0298fbb" (UID: "399eaed7-c945-4929-a130-8c11e0298fbb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.493982 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config" (OuterVolumeSpecName: "config") pod "399eaed7-c945-4929-a130-8c11e0298fbb" (UID: "399eaed7-c945-4929-a130-8c11e0298fbb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.494445 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "399eaed7-c945-4929-a130-8c11e0298fbb" (UID: "399eaed7-c945-4929-a130-8c11e0298fbb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.499954 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "399eaed7-c945-4929-a130-8c11e0298fbb" (UID: "399eaed7-c945-4929-a130-8c11e0298fbb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.500129 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp" (OuterVolumeSpecName: "kube-api-access-lffpp") pod "399eaed7-c945-4929-a130-8c11e0298fbb" (UID: "399eaed7-c945-4929-a130-8c11e0298fbb"). InnerVolumeSpecName "kube-api-access-lffpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.594521 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lffpp\" (UniqueName: \"kubernetes.io/projected/399eaed7-c945-4929-a130-8c11e0298fbb-kube-api-access-lffpp\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.594609 4852 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/399eaed7-c945-4929-a130-8c11e0298fbb-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.594632 4852 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.594653 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.594676 4852 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/399eaed7-c945-4929-a130-8c11e0298fbb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.682899 4852 generic.go:334] "Generic (PLEG): container finished" podID="399eaed7-c945-4929-a130-8c11e0298fbb" containerID="fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07" exitCode=0 Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.682943 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" event={"ID":"399eaed7-c945-4929-a130-8c11e0298fbb","Type":"ContainerDied","Data":"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07"} Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.682981 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" event={"ID":"399eaed7-c945-4929-a130-8c11e0298fbb","Type":"ContainerDied","Data":"79e222cb13b52bbdbdb58bda4479841d0c0fcc810f45d4f51bc4dd8b72344e23"} Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.683001 4852 scope.go:117] "RemoveContainer" containerID="fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.683003 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fd87549dd-qfc47" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.705271 4852 scope.go:117] "RemoveContainer" containerID="fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07" Jan 29 10:48:08 crc kubenswrapper[4852]: E0129 10:48:08.705797 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07\": container with ID starting with fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07 not found: ID does not exist" containerID="fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.705873 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07"} err="failed to get container status \"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07\": rpc error: code = NotFound desc = could not find container \"fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07\": container with ID starting with fc24192b278542df3646dd7fe7372db0024ca8a75486a4d4961b4febf483ac07 not found: ID does not exist" Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.717906 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:48:08 crc kubenswrapper[4852]: I0129 10:48:08.722914 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fd87549dd-qfc47"] Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.470359 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="399eaed7-c945-4929-a130-8c11e0298fbb" path="/var/lib/kubelet/pods/399eaed7-c945-4929-a130-8c11e0298fbb/volumes" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.479516 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-57b769b7bf-v6sxk"] Jan 29 10:48:09 crc kubenswrapper[4852]: E0129 10:48:09.479820 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="399eaed7-c945-4929-a130-8c11e0298fbb" containerName="controller-manager" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.479839 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="399eaed7-c945-4929-a130-8c11e0298fbb" containerName="controller-manager" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.479975 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="399eaed7-c945-4929-a130-8c11e0298fbb" containerName="controller-manager" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.480367 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.483189 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.483718 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.485156 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.485621 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.485663 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.485632 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.496245 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-57b769b7bf-v6sxk"] Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.500493 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.530629 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-client-ca\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.530842 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-config\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.531089 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-proxy-ca-bundles\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.531135 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-serving-cert\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.531196 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck5hz\" (UniqueName: \"kubernetes.io/projected/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-kube-api-access-ck5hz\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.632519 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-client-ca\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.632725 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-config\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.632915 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-proxy-ca-bundles\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.632957 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-serving-cert\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.633014 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck5hz\" (UniqueName: \"kubernetes.io/projected/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-kube-api-access-ck5hz\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.636156 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-client-ca\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.637029 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-config\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.639064 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-serving-cert\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.639719 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-proxy-ca-bundles\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.664983 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck5hz\" (UniqueName: \"kubernetes.io/projected/cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4-kube-api-access-ck5hz\") pod \"controller-manager-57b769b7bf-v6sxk\" (UID: \"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4\") " pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:09 crc kubenswrapper[4852]: I0129 10:48:09.849925 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.013033 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-57b769b7bf-v6sxk"] Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.533517 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" podUID="ee27437f-db20-4337-813d-aaa57c3a95d5" containerName="registry" containerID="cri-o://f34307266f43e050fadb9eadd23785daf8113f78d6ae7c54ee9b4e748e8f3ce4" gracePeriod=30 Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.698280 4852 generic.go:334] "Generic (PLEG): container finished" podID="ee27437f-db20-4337-813d-aaa57c3a95d5" containerID="f34307266f43e050fadb9eadd23785daf8113f78d6ae7c54ee9b4e748e8f3ce4" exitCode=0 Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.698427 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" event={"ID":"ee27437f-db20-4337-813d-aaa57c3a95d5","Type":"ContainerDied","Data":"f34307266f43e050fadb9eadd23785daf8113f78d6ae7c54ee9b4e748e8f3ce4"} Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.700569 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" event={"ID":"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4","Type":"ContainerStarted","Data":"11c739d9d93bc2bf3deb3da2b2e48968b8d8e900c1afbf45cafe730e782d16e0"} Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.700627 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" event={"ID":"cdcc600a-d8e7-40c2-bdc5-d89ab1dceeb4","Type":"ContainerStarted","Data":"3b0676961244bcb68b7bbfe0b5b707f8207cf46d39b0f3f58175376ec5b759c0"} Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.701024 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.705335 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.723718 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-57b769b7bf-v6sxk" podStartSLOduration=3.723692464 podStartE2EDuration="3.723692464s" podCreationTimestamp="2026-01-29 10:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:48:10.717389467 +0000 UTC m=+387.934720601" watchObservedRunningTime="2026-01-29 10:48:10.723692464 +0000 UTC m=+387.941023608" Jan 29 10:48:10 crc kubenswrapper[4852]: I0129 10:48:10.949626 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.148731 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.148785 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7tvv\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.148848 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.148864 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.148899 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.149026 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.149043 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.149103 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets\") pod \"ee27437f-db20-4337-813d-aaa57c3a95d5\" (UID: \"ee27437f-db20-4337-813d-aaa57c3a95d5\") " Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.150072 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.150111 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.155125 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.155504 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.156007 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv" (OuterVolumeSpecName: "kube-api-access-x7tvv") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "kube-api-access-x7tvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.163299 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.165535 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.167873 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "ee27437f-db20-4337-813d-aaa57c3a95d5" (UID: "ee27437f-db20-4337-813d-aaa57c3a95d5"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250710 4852 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ee27437f-db20-4337-813d-aaa57c3a95d5-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250750 4852 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250765 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7tvv\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-kube-api-access-x7tvv\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250776 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250817 4852 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250827 4852 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ee27437f-db20-4337-813d-aaa57c3a95d5-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.250838 4852 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ee27437f-db20-4337-813d-aaa57c3a95d5-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.707020 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" event={"ID":"ee27437f-db20-4337-813d-aaa57c3a95d5","Type":"ContainerDied","Data":"fa69c46a25e23ab97b5fac4a0c7606dd0d6d67d1c900465ea590cb5321591d5e"} Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.707069 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tf7qq" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.707097 4852 scope.go:117] "RemoveContainer" containerID="f34307266f43e050fadb9eadd23785daf8113f78d6ae7c54ee9b4e748e8f3ce4" Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.727160 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:48:11 crc kubenswrapper[4852]: I0129 10:48:11.730965 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tf7qq"] Jan 29 10:48:13 crc kubenswrapper[4852]: I0129 10:48:13.470511 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee27437f-db20-4337-813d-aaa57c3a95d5" path="/var/lib/kubelet/pods/ee27437f-db20-4337-813d-aaa57c3a95d5/volumes" Jan 29 10:48:30 crc kubenswrapper[4852]: I0129 10:48:30.017088 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:48:30 crc kubenswrapper[4852]: I0129 10:48:30.017483 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:49:00 crc kubenswrapper[4852]: I0129 10:49:00.017552 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:49:00 crc kubenswrapper[4852]: I0129 10:49:00.018192 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:49:00 crc kubenswrapper[4852]: I0129 10:49:00.018248 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:49:00 crc kubenswrapper[4852]: I0129 10:49:00.018930 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 10:49:00 crc kubenswrapper[4852]: I0129 10:49:00.018989 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d" gracePeriod=600 Jan 29 10:49:01 crc kubenswrapper[4852]: I0129 10:49:01.001274 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d" exitCode=0 Jan 29 10:49:01 crc kubenswrapper[4852]: I0129 10:49:01.001404 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d"} Jan 29 10:49:01 crc kubenswrapper[4852]: I0129 10:49:01.002346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e"} Jan 29 10:49:01 crc kubenswrapper[4852]: I0129 10:49:01.002382 4852 scope.go:117] "RemoveContainer" containerID="fa6ac3a3bac40de941a854b029bf11995568bd29c90c7918edbb632836571a25" Jan 29 10:51:00 crc kubenswrapper[4852]: I0129 10:51:00.017652 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:51:00 crc kubenswrapper[4852]: I0129 10:51:00.018167 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:51:30 crc kubenswrapper[4852]: I0129 10:51:30.017231 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:51:30 crc kubenswrapper[4852]: I0129 10:51:30.017831 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:52:00 crc kubenswrapper[4852]: I0129 10:52:00.016995 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:52:00 crc kubenswrapper[4852]: I0129 10:52:00.017768 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:52:00 crc kubenswrapper[4852]: I0129 10:52:00.017833 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:52:00 crc kubenswrapper[4852]: I0129 10:52:00.018697 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 10:52:00 crc kubenswrapper[4852]: I0129 10:52:00.018796 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e" gracePeriod=600 Jan 29 10:52:01 crc kubenswrapper[4852]: I0129 10:52:01.112685 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e" exitCode=0 Jan 29 10:52:01 crc kubenswrapper[4852]: I0129 10:52:01.112796 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e"} Jan 29 10:52:01 crc kubenswrapper[4852]: I0129 10:52:01.113239 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c"} Jan 29 10:52:01 crc kubenswrapper[4852]: I0129 10:52:01.113290 4852 scope.go:117] "RemoveContainer" containerID="f3e984f53a601ed918e6d663c7eafe23551b0d7d2b79b683f5bc8295ed6d785d" Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.885142 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-22xhj"] Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886256 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-controller" containerID="cri-o://785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886304 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886337 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-node" containerID="cri-o://d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886384 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="northd" containerID="cri-o://c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886392 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-acl-logging" containerID="cri-o://bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886315 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="nbdb" containerID="cri-o://c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.886452 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="sbdb" containerID="cri-o://9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" gracePeriod=30 Jan 29 10:52:51 crc kubenswrapper[4852]: I0129 10:52:51.923894 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" containerID="cri-o://c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" gracePeriod=30 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.223433 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/3.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.226505 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovn-acl-logging/0.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.227183 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovn-controller/0.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.227665 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282120 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-b7nw7"] Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282321 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282337 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282347 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282354 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282360 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282366 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282375 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="sbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282380 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="sbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282390 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-acl-logging" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282395 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-acl-logging" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282401 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282407 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282413 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282418 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282426 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282431 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282441 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee27437f-db20-4337-813d-aaa57c3a95d5" containerName="registry" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282446 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee27437f-db20-4337-813d-aaa57c3a95d5" containerName="registry" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282456 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kubecfg-setup" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282462 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kubecfg-setup" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282472 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="northd" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282477 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="northd" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282487 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-node" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282496 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-node" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282524 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="nbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282529 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="nbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282630 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-acl-logging" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282638 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee27437f-db20-4337-813d-aaa57c3a95d5" containerName="registry" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282647 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282655 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="sbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282664 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282670 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="nbdb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282678 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282686 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovn-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282693 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-node" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282702 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282709 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="northd" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.282795 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282801 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.282879 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.283039 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerName="ovnkube-controller" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.284423 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339367 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339430 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsng4\" (UniqueName: \"kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339446 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339465 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339520 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339523 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339545 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339567 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339577 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339626 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339575 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339653 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339671 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339694 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339732 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339759 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339777 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339818 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340319 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339814 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.339857 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket" (OuterVolumeSpecName: "log-socket") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340022 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340253 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340432 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340454 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340481 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340534 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340532 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340499 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340559 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log" (OuterVolumeSpecName: "node-log") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340566 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340607 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash" (OuterVolumeSpecName: "host-slash") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340633 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340645 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340664 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes\") pod \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\" (UID: \"2e44156c-fa1a-4edf-a317-e63b96f7aae4\") " Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.340809 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341017 4852 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341043 4852 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341057 4852 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341070 4852 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341083 4852 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-log-socket\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341094 4852 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-node-log\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341106 4852 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341118 4852 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341128 4852 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-slash\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341140 4852 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341152 4852 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341164 4852 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341176 4852 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341187 4852 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341200 4852 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341274 4852 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.341547 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.344240 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.344480 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4" (OuterVolumeSpecName: "kube-api-access-qsng4") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "kube-api-access-qsng4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.351859 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "2e44156c-fa1a-4edf-a317-e63b96f7aae4" (UID: "2e44156c-fa1a-4edf-a317-e63b96f7aae4"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.434504 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/2.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.435086 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/1.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.435192 4852 generic.go:334] "Generic (PLEG): container finished" podID="80701ea9-a994-4a9f-8291-e3e40decfeda" containerID="4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec" exitCode=2 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.435327 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerDied","Data":"4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.435421 4852 scope.go:117] "RemoveContainer" containerID="ee55d860aeb0d5ae4f8da0a3c04cd897edff43714c7901303c3f30f0518014a1" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.436448 4852 scope.go:117] "RemoveContainer" containerID="4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.440112 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovnkube-controller/3.log" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.440435 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-sd8vh_openshift-multus(80701ea9-a994-4a9f-8291-e3e40decfeda)\"" pod="openshift-multus/multus-sd8vh" podUID="80701ea9-a994-4a9f-8291-e3e40decfeda" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.441993 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-kubelet\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442028 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-node-log\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442045 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-slash\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442065 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442087 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-systemd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442109 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovn-node-metrics-cert\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442127 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-netd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442174 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-config\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442204 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442224 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-systemd-units\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442246 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-var-lib-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442269 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfpqr\" (UniqueName: \"kubernetes.io/projected/a13183b6-8dbb-4db4-8dd2-397aa5669824-kube-api-access-bfpqr\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442287 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442316 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-etc-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442340 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-env-overrides\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442360 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-bin\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442382 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-log-socket\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442406 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-netns\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-script-lib\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442450 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-ovn\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442499 4852 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e44156c-fa1a-4edf-a317-e63b96f7aae4-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442515 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsng4\" (UniqueName: \"kubernetes.io/projected/2e44156c-fa1a-4edf-a317-e63b96f7aae4-kube-api-access-qsng4\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442530 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e44156c-fa1a-4edf-a317-e63b96f7aae4-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.442541 4852 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e44156c-fa1a-4edf-a317-e63b96f7aae4-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.443876 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovn-acl-logging/0.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.444745 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-22xhj_2e44156c-fa1a-4edf-a317-e63b96f7aae4/ovn-controller/0.log" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445094 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445120 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445128 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445138 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445146 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445154 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" exitCode=0 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445163 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" exitCode=143 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445171 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" exitCode=143 Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445190 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445218 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445231 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445244 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445258 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445270 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445293 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445305 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445313 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445320 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445326 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445333 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445339 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445346 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445351 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445356 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445363 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445371 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445377 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445382 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445387 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445392 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445397 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445402 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445407 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445412 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445417 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445425 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445432 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445438 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445443 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445449 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445454 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445460 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445465 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445470 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445475 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445481 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445487 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" event={"ID":"2e44156c-fa1a-4edf-a317-e63b96f7aae4","Type":"ContainerDied","Data":"636f954c5f1d9e5c4fe326369f1fffb1156d9c3c44fe05eda3d122d77ceb688c"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445495 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445500 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445505 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445510 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445515 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445519 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445524 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445528 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445534 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445540 4852 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.445639 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-22xhj" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.482688 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-22xhj"] Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.484283 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-22xhj"] Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.520886 4852 scope.go:117] "RemoveContainer" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.539661 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544167 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-config\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544234 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544263 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-var-lib-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544289 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-systemd-units\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544304 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544320 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfpqr\" (UniqueName: \"kubernetes.io/projected/a13183b6-8dbb-4db4-8dd2-397aa5669824-kube-api-access-bfpqr\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544353 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-etc-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-bin\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-env-overrides\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544399 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-log-socket\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544416 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-netns\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-script-lib\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544462 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-ovn\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544491 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-node-log\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544507 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-kubelet\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544519 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-slash\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544546 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-systemd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544561 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovn-node-metrics-cert\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544576 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-netd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.544673 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-netd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.545308 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-config\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.545496 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.545530 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-var-lib-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.545554 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-systemd-units\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.545574 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546368 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-etc-openvswitch\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546404 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-cni-bin\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546875 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-node-log\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546915 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-systemd\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546937 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-log-socket\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546934 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-ovn-kubernetes\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.546975 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-kubelet\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.547024 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-run-netns\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.547044 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-run-ovn\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.547062 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a13183b6-8dbb-4db4-8dd2-397aa5669824-host-slash\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.547821 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovnkube-script-lib\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.547934 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a13183b6-8dbb-4db4-8dd2-397aa5669824-env-overrides\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.550122 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a13183b6-8dbb-4db4-8dd2-397aa5669824-ovn-node-metrics-cert\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.563315 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfpqr\" (UniqueName: \"kubernetes.io/projected/a13183b6-8dbb-4db4-8dd2-397aa5669824-kube-api-access-bfpqr\") pod \"ovnkube-node-b7nw7\" (UID: \"a13183b6-8dbb-4db4-8dd2-397aa5669824\") " pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.565491 4852 scope.go:117] "RemoveContainer" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.579525 4852 scope.go:117] "RemoveContainer" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.596909 4852 scope.go:117] "RemoveContainer" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.598286 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.613832 4852 scope.go:117] "RemoveContainer" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.628987 4852 scope.go:117] "RemoveContainer" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.645471 4852 scope.go:117] "RemoveContainer" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.659286 4852 scope.go:117] "RemoveContainer" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.676773 4852 scope.go:117] "RemoveContainer" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.689299 4852 scope.go:117] "RemoveContainer" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.689872 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": container with ID starting with c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba not found: ID does not exist" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.689910 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} err="failed to get container status \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": rpc error: code = NotFound desc = could not find container \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": container with ID starting with c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.689940 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.690203 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": container with ID starting with 213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7 not found: ID does not exist" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690242 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} err="failed to get container status \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": rpc error: code = NotFound desc = could not find container \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": container with ID starting with 213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690255 4852 scope.go:117] "RemoveContainer" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.690554 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": container with ID starting with 9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2 not found: ID does not exist" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690601 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} err="failed to get container status \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": rpc error: code = NotFound desc = could not find container \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": container with ID starting with 9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690613 4852 scope.go:117] "RemoveContainer" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.690806 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": container with ID starting with c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0 not found: ID does not exist" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690828 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} err="failed to get container status \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": rpc error: code = NotFound desc = could not find container \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": container with ID starting with c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.690846 4852 scope.go:117] "RemoveContainer" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.691034 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": container with ID starting with c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb not found: ID does not exist" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691066 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} err="failed to get container status \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": rpc error: code = NotFound desc = could not find container \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": container with ID starting with c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691079 4852 scope.go:117] "RemoveContainer" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.691296 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": container with ID starting with 9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421 not found: ID does not exist" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691321 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} err="failed to get container status \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": rpc error: code = NotFound desc = could not find container \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": container with ID starting with 9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691335 4852 scope.go:117] "RemoveContainer" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.691522 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": container with ID starting with d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828 not found: ID does not exist" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691561 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} err="failed to get container status \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": rpc error: code = NotFound desc = could not find container \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": container with ID starting with d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691575 4852 scope.go:117] "RemoveContainer" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.691779 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": container with ID starting with bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d not found: ID does not exist" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691806 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} err="failed to get container status \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": rpc error: code = NotFound desc = could not find container \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": container with ID starting with bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.691819 4852 scope.go:117] "RemoveContainer" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.691993 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": container with ID starting with 785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957 not found: ID does not exist" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692025 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} err="failed to get container status \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": rpc error: code = NotFound desc = could not find container \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": container with ID starting with 785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692038 4852 scope.go:117] "RemoveContainer" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: E0129 10:52:52.692198 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": container with ID starting with 7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c not found: ID does not exist" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692219 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} err="failed to get container status \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": rpc error: code = NotFound desc = could not find container \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": container with ID starting with 7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692232 4852 scope.go:117] "RemoveContainer" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692408 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} err="failed to get container status \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": rpc error: code = NotFound desc = could not find container \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": container with ID starting with c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692431 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692674 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} err="failed to get container status \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": rpc error: code = NotFound desc = could not find container \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": container with ID starting with 213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692692 4852 scope.go:117] "RemoveContainer" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692865 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} err="failed to get container status \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": rpc error: code = NotFound desc = could not find container \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": container with ID starting with 9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.692883 4852 scope.go:117] "RemoveContainer" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693113 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} err="failed to get container status \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": rpc error: code = NotFound desc = could not find container \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": container with ID starting with c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693132 4852 scope.go:117] "RemoveContainer" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693318 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} err="failed to get container status \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": rpc error: code = NotFound desc = could not find container \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": container with ID starting with c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693336 4852 scope.go:117] "RemoveContainer" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693503 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} err="failed to get container status \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": rpc error: code = NotFound desc = could not find container \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": container with ID starting with 9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693531 4852 scope.go:117] "RemoveContainer" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693733 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} err="failed to get container status \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": rpc error: code = NotFound desc = could not find container \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": container with ID starting with d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693754 4852 scope.go:117] "RemoveContainer" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693927 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} err="failed to get container status \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": rpc error: code = NotFound desc = could not find container \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": container with ID starting with bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.693946 4852 scope.go:117] "RemoveContainer" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694159 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} err="failed to get container status \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": rpc error: code = NotFound desc = could not find container \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": container with ID starting with 785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694177 4852 scope.go:117] "RemoveContainer" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694339 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} err="failed to get container status \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": rpc error: code = NotFound desc = could not find container \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": container with ID starting with 7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694358 4852 scope.go:117] "RemoveContainer" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694524 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} err="failed to get container status \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": rpc error: code = NotFound desc = could not find container \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": container with ID starting with c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694541 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694739 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} err="failed to get container status \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": rpc error: code = NotFound desc = could not find container \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": container with ID starting with 213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.694762 4852 scope.go:117] "RemoveContainer" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695016 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} err="failed to get container status \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": rpc error: code = NotFound desc = could not find container \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": container with ID starting with 9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695034 4852 scope.go:117] "RemoveContainer" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695214 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} err="failed to get container status \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": rpc error: code = NotFound desc = could not find container \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": container with ID starting with c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695251 4852 scope.go:117] "RemoveContainer" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695547 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} err="failed to get container status \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": rpc error: code = NotFound desc = could not find container \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": container with ID starting with c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695607 4852 scope.go:117] "RemoveContainer" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695894 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} err="failed to get container status \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": rpc error: code = NotFound desc = could not find container \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": container with ID starting with 9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.695921 4852 scope.go:117] "RemoveContainer" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696102 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} err="failed to get container status \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": rpc error: code = NotFound desc = could not find container \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": container with ID starting with d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696124 4852 scope.go:117] "RemoveContainer" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696291 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} err="failed to get container status \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": rpc error: code = NotFound desc = could not find container \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": container with ID starting with bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696313 4852 scope.go:117] "RemoveContainer" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696480 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} err="failed to get container status \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": rpc error: code = NotFound desc = could not find container \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": container with ID starting with 785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696499 4852 scope.go:117] "RemoveContainer" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696689 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} err="failed to get container status \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": rpc error: code = NotFound desc = could not find container \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": container with ID starting with 7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696709 4852 scope.go:117] "RemoveContainer" containerID="c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696877 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba"} err="failed to get container status \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": rpc error: code = NotFound desc = could not find container \"c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba\": container with ID starting with c35da32d116cbf2ecad3ccbe50cb7a9e22ef65cb62fc8c3dad32150e564455ba not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.696918 4852 scope.go:117] "RemoveContainer" containerID="213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697103 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7"} err="failed to get container status \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": rpc error: code = NotFound desc = could not find container \"213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7\": container with ID starting with 213967db1b7a6a3bb6581f0f03adfeb495743b5339bc990bf39c7725925bfbc7 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697123 4852 scope.go:117] "RemoveContainer" containerID="9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697291 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2"} err="failed to get container status \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": rpc error: code = NotFound desc = could not find container \"9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2\": container with ID starting with 9bfc45aadeefeb86e582c1bf977143dc2248dddf82a048952231be6657502cb2 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697309 4852 scope.go:117] "RemoveContainer" containerID="c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697485 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0"} err="failed to get container status \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": rpc error: code = NotFound desc = could not find container \"c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0\": container with ID starting with c0b1b30ab9af4eed0c90bd79141769a051f43ac922661341df50da822c740dc0 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697503 4852 scope.go:117] "RemoveContainer" containerID="c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697685 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb"} err="failed to get container status \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": rpc error: code = NotFound desc = could not find container \"c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb\": container with ID starting with c03313586f2f50d9516df304818aeb27b80e50ba59f1d1856d9bac4142236deb not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697704 4852 scope.go:117] "RemoveContainer" containerID="9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697867 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421"} err="failed to get container status \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": rpc error: code = NotFound desc = could not find container \"9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421\": container with ID starting with 9d4588eecb9da3c9359e381a708a070a681595f34d60b475b5c5f85f39e16421 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.697887 4852 scope.go:117] "RemoveContainer" containerID="d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698054 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828"} err="failed to get container status \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": rpc error: code = NotFound desc = could not find container \"d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828\": container with ID starting with d6e3d5af8e3aa5d5a942f326a55585e77c39e4af10496280eddad3344f109828 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698079 4852 scope.go:117] "RemoveContainer" containerID="bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698282 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d"} err="failed to get container status \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": rpc error: code = NotFound desc = could not find container \"bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d\": container with ID starting with bf398580ed019fccb2d65dee4c6bd26db192ac06fbb7c7d44e7687bd680f502d not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698305 4852 scope.go:117] "RemoveContainer" containerID="785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698504 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957"} err="failed to get container status \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": rpc error: code = NotFound desc = could not find container \"785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957\": container with ID starting with 785dfe8ca9f80e652cf85bded16e52c3b8bd3507f705ff7a489519b61da22957 not found: ID does not exist" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698523 4852 scope.go:117] "RemoveContainer" containerID="7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c" Jan 29 10:52:52 crc kubenswrapper[4852]: I0129 10:52:52.698747 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c"} err="failed to get container status \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": rpc error: code = NotFound desc = could not find container \"7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c\": container with ID starting with 7ee00f3a834b356dd261079e5c1f673b22747f866da5f316eb51b2b884ca624c not found: ID does not exist" Jan 29 10:52:53 crc kubenswrapper[4852]: I0129 10:52:53.453107 4852 generic.go:334] "Generic (PLEG): container finished" podID="a13183b6-8dbb-4db4-8dd2-397aa5669824" containerID="d043a2c7fd0c0208fc8018ea67b90468ab008dcbac1b6b2f9ac8324a13b495b4" exitCode=0 Jan 29 10:52:53 crc kubenswrapper[4852]: I0129 10:52:53.453406 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerDied","Data":"d043a2c7fd0c0208fc8018ea67b90468ab008dcbac1b6b2f9ac8324a13b495b4"} Jan 29 10:52:53 crc kubenswrapper[4852]: I0129 10:52:53.453434 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"9acde0c2b4c127bc551289944106f10fe156ed2c32e17983508d4ff1c5517899"} Jan 29 10:52:53 crc kubenswrapper[4852]: I0129 10:52:53.457202 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/2.log" Jan 29 10:52:53 crc kubenswrapper[4852]: I0129 10:52:53.487416 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e44156c-fa1a-4edf-a317-e63b96f7aae4" path="/var/lib/kubelet/pods/2e44156c-fa1a-4edf-a317-e63b96f7aae4/volumes" Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.475482 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"78663e88b443ee23cd0db5ab39ac0e6ba3c4a9b05f7f2460c0e70db36d33cf2e"} Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.475981 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"2ca1f9953a5918a29461295adc5eeb25787b1a4dc9c87bad6d6ba19664e791c3"} Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.475994 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"62c152216be2abd131e614b9a8e1fbaafc1a59d2e52fddc8b6032f614659cb18"} Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.476003 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"4eb358e91fefedc897d35e35ca3c3e0722d777090a6fd8c4e03b1768aa58112b"} Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.476011 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"cc82bb9fde045402bb10d2e012ee9def15ea9a5e816e275eacc533780409c02f"} Jan 29 10:52:54 crc kubenswrapper[4852]: I0129 10:52:54.476019 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"12bdf0362ca982bae2f44478f1627ab78338cd4925fef9672a2f5ad7739ae157"} Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.489094 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"e47e09ddffb580b48de23c43797113faff720f3923478261a9fba055fc06bad5"} Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.746053 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-mprg5"] Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.746738 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.749290 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.749522 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.749684 4852 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-4ld2w" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.751949 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.799644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdj6j\" (UniqueName: \"kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.799732 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.799773 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.900860 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.900948 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdj6j\" (UniqueName: \"kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.901001 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.901282 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.901814 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:56 crc kubenswrapper[4852]: I0129 10:52:56.918967 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdj6j\" (UniqueName: \"kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j\") pod \"crc-storage-crc-mprg5\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:57 crc kubenswrapper[4852]: I0129 10:52:57.062872 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:57 crc kubenswrapper[4852]: E0129 10:52:57.100402 4852 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(d8b9efbbb7300dbccd16f752de51816c35507bad0cb4e401e3e009ca5a1c3e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:52:57 crc kubenswrapper[4852]: E0129 10:52:57.100548 4852 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(d8b9efbbb7300dbccd16f752de51816c35507bad0cb4e401e3e009ca5a1c3e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:57 crc kubenswrapper[4852]: E0129 10:52:57.100629 4852 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(d8b9efbbb7300dbccd16f752de51816c35507bad0cb4e401e3e009ca5a1c3e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:57 crc kubenswrapper[4852]: E0129 10:52:57.100713 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(d8b9efbbb7300dbccd16f752de51816c35507bad0cb4e401e3e009ca5a1c3e8b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-mprg5" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.291355 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mprg5"] Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.292034 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.292491 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:59 crc kubenswrapper[4852]: E0129 10:52:59.321623 4852 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(77e9093786cac4e730d73ee48746c808d075bea8c8e50ec3eeaeabe458cc9f77): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:52:59 crc kubenswrapper[4852]: E0129 10:52:59.321693 4852 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(77e9093786cac4e730d73ee48746c808d075bea8c8e50ec3eeaeabe458cc9f77): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:59 crc kubenswrapper[4852]: E0129 10:52:59.321726 4852 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(77e9093786cac4e730d73ee48746c808d075bea8c8e50ec3eeaeabe458cc9f77): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:52:59 crc kubenswrapper[4852]: E0129 10:52:59.321783 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(77e9093786cac4e730d73ee48746c808d075bea8c8e50ec3eeaeabe458cc9f77): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-mprg5" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.506727 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" event={"ID":"a13183b6-8dbb-4db4-8dd2-397aa5669824","Type":"ContainerStarted","Data":"965e4d858cf42c785d55adf97f7c3f53a9ac0470849fcade59a38ddac5ff0675"} Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.507728 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.507755 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.507800 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.535182 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.540525 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" podStartSLOduration=7.540506378 podStartE2EDuration="7.540506378s" podCreationTimestamp="2026-01-29 10:52:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:52:59.537738219 +0000 UTC m=+676.755069363" watchObservedRunningTime="2026-01-29 10:52:59.540506378 +0000 UTC m=+676.757837512" Jan 29 10:52:59 crc kubenswrapper[4852]: I0129 10:52:59.542484 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:53:03 crc kubenswrapper[4852]: I0129 10:53:03.466625 4852 scope.go:117] "RemoveContainer" containerID="4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec" Jan 29 10:53:03 crc kubenswrapper[4852]: E0129 10:53:03.467184 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-sd8vh_openshift-multus(80701ea9-a994-4a9f-8291-e3e40decfeda)\"" pod="openshift-multus/multus-sd8vh" podUID="80701ea9-a994-4a9f-8291-e3e40decfeda" Jan 29 10:53:12 crc kubenswrapper[4852]: I0129 10:53:12.463016 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:12 crc kubenswrapper[4852]: I0129 10:53:12.464064 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:12 crc kubenswrapper[4852]: E0129 10:53:12.486571 4852 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(2d3b00a50ef0504438cb41755021fb7b567aec816e02bdb87644f11c36d41351): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 10:53:12 crc kubenswrapper[4852]: E0129 10:53:12.486738 4852 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(2d3b00a50ef0504438cb41755021fb7b567aec816e02bdb87644f11c36d41351): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:12 crc kubenswrapper[4852]: E0129 10:53:12.486822 4852 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(2d3b00a50ef0504438cb41755021fb7b567aec816e02bdb87644f11c36d41351): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:12 crc kubenswrapper[4852]: E0129 10:53:12.486892 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-mprg5_crc-storage(79b0e648-79cf-41be-82ff-1850c3dd519d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-mprg5_crc-storage_79b0e648-79cf-41be-82ff-1850c3dd519d_0(2d3b00a50ef0504438cb41755021fb7b567aec816e02bdb87644f11c36d41351): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-mprg5" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" Jan 29 10:53:18 crc kubenswrapper[4852]: I0129 10:53:18.463700 4852 scope.go:117] "RemoveContainer" containerID="4641556984f4546183c797a0a6201212a965b94e1e22438c8943b95acf3d17ec" Jan 29 10:53:19 crc kubenswrapper[4852]: I0129 10:53:19.621509 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd8vh_80701ea9-a994-4a9f-8291-e3e40decfeda/kube-multus/2.log" Jan 29 10:53:19 crc kubenswrapper[4852]: I0129 10:53:19.621825 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd8vh" event={"ID":"80701ea9-a994-4a9f-8291-e3e40decfeda","Type":"ContainerStarted","Data":"32996a386df60fdb600442c940060cd301d21b4a870aaf567cce378c9ed93d46"} Jan 29 10:53:22 crc kubenswrapper[4852]: I0129 10:53:22.636946 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-b7nw7" Jan 29 10:53:27 crc kubenswrapper[4852]: I0129 10:53:27.462912 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:27 crc kubenswrapper[4852]: I0129 10:53:27.463647 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:27 crc kubenswrapper[4852]: I0129 10:53:27.659126 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-mprg5"] Jan 29 10:53:27 crc kubenswrapper[4852]: I0129 10:53:27.670078 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 10:53:28 crc kubenswrapper[4852]: I0129 10:53:28.668475 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mprg5" event={"ID":"79b0e648-79cf-41be-82ff-1850c3dd519d","Type":"ContainerStarted","Data":"b67154cd8c47f35360479a4b304ac5925c3f1023c5206a690f4b671e60491dfd"} Jan 29 10:53:29 crc kubenswrapper[4852]: I0129 10:53:29.675935 4852 generic.go:334] "Generic (PLEG): container finished" podID="79b0e648-79cf-41be-82ff-1850c3dd519d" containerID="11456debc4eb2650e6db68c2991e53f1c5827ebfcc4a4d2dab561ab0bd5fa57c" exitCode=0 Jan 29 10:53:29 crc kubenswrapper[4852]: I0129 10:53:29.676354 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mprg5" event={"ID":"79b0e648-79cf-41be-82ff-1850c3dd519d","Type":"ContainerDied","Data":"11456debc4eb2650e6db68c2991e53f1c5827ebfcc4a4d2dab561ab0bd5fa57c"} Jan 29 10:53:30 crc kubenswrapper[4852]: I0129 10:53:30.971808 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.011140 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage\") pod \"79b0e648-79cf-41be-82ff-1850c3dd519d\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.011297 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdj6j\" (UniqueName: \"kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j\") pod \"79b0e648-79cf-41be-82ff-1850c3dd519d\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.011319 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt\") pod \"79b0e648-79cf-41be-82ff-1850c3dd519d\" (UID: \"79b0e648-79cf-41be-82ff-1850c3dd519d\") " Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.011566 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "79b0e648-79cf-41be-82ff-1850c3dd519d" (UID: "79b0e648-79cf-41be-82ff-1850c3dd519d"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.016685 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j" (OuterVolumeSpecName: "kube-api-access-tdj6j") pod "79b0e648-79cf-41be-82ff-1850c3dd519d" (UID: "79b0e648-79cf-41be-82ff-1850c3dd519d"). InnerVolumeSpecName "kube-api-access-tdj6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.024526 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "79b0e648-79cf-41be-82ff-1850c3dd519d" (UID: "79b0e648-79cf-41be-82ff-1850c3dd519d"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.112822 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdj6j\" (UniqueName: \"kubernetes.io/projected/79b0e648-79cf-41be-82ff-1850c3dd519d-kube-api-access-tdj6j\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.112870 4852 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/79b0e648-79cf-41be-82ff-1850c3dd519d-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.112890 4852 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/79b0e648-79cf-41be-82ff-1850c3dd519d-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.689512 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-mprg5" event={"ID":"79b0e648-79cf-41be-82ff-1850c3dd519d","Type":"ContainerDied","Data":"b67154cd8c47f35360479a4b304ac5925c3f1023c5206a690f4b671e60491dfd"} Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.689895 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b67154cd8c47f35360479a4b304ac5925c3f1023c5206a690f4b671e60491dfd" Jan 29 10:53:31 crc kubenswrapper[4852]: I0129 10:53:31.689540 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-mprg5" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.734819 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx"] Jan 29 10:53:37 crc kubenswrapper[4852]: E0129 10:53:37.735418 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" containerName="storage" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.735440 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" containerName="storage" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.735622 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" containerName="storage" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.736500 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.739742 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.744915 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx"] Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.895572 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.895651 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmhmw\" (UniqueName: \"kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.895717 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.996790 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.997265 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmhmw\" (UniqueName: \"kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.997385 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.997753 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:37 crc kubenswrapper[4852]: I0129 10:53:37.997881 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:38 crc kubenswrapper[4852]: I0129 10:53:38.017815 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmhmw\" (UniqueName: \"kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:38 crc kubenswrapper[4852]: I0129 10:53:38.100033 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:38 crc kubenswrapper[4852]: I0129 10:53:38.301506 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx"] Jan 29 10:53:38 crc kubenswrapper[4852]: I0129 10:53:38.729666 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerStarted","Data":"6e9845ed925fbfe653af0a4a93d1d7bb8c4082de5c4d2a8a2dc9d0fd1fcce9e8"} Jan 29 10:53:38 crc kubenswrapper[4852]: I0129 10:53:38.730008 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerStarted","Data":"953b9e2a4b1d2b248060ed00bd66057ce79ed5d3538bd0d9e4764922390b7233"} Jan 29 10:53:39 crc kubenswrapper[4852]: I0129 10:53:39.737167 4852 generic.go:334] "Generic (PLEG): container finished" podID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerID="6e9845ed925fbfe653af0a4a93d1d7bb8c4082de5c4d2a8a2dc9d0fd1fcce9e8" exitCode=0 Jan 29 10:53:39 crc kubenswrapper[4852]: I0129 10:53:39.737223 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerDied","Data":"6e9845ed925fbfe653af0a4a93d1d7bb8c4082de5c4d2a8a2dc9d0fd1fcce9e8"} Jan 29 10:53:41 crc kubenswrapper[4852]: I0129 10:53:41.750218 4852 generic.go:334] "Generic (PLEG): container finished" podID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerID="540ba3672109398018223fb1c5c66a0509ec8e20d20c7ff3e8291ecafc3365e7" exitCode=0 Jan 29 10:53:41 crc kubenswrapper[4852]: I0129 10:53:41.750319 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerDied","Data":"540ba3672109398018223fb1c5c66a0509ec8e20d20c7ff3e8291ecafc3365e7"} Jan 29 10:53:42 crc kubenswrapper[4852]: I0129 10:53:42.758832 4852 generic.go:334] "Generic (PLEG): container finished" podID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerID="5474019620cc57aa8b2c7b4ff5c90555ff61c979d080d1fde84847bb7770a39b" exitCode=0 Jan 29 10:53:42 crc kubenswrapper[4852]: I0129 10:53:42.758890 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerDied","Data":"5474019620cc57aa8b2c7b4ff5c90555ff61c979d080d1fde84847bb7770a39b"} Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.028491 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.204396 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmhmw\" (UniqueName: \"kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw\") pod \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.204502 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util\") pod \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.204670 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle\") pod \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\" (UID: \"e7cd1841-eae4-4fe0-ac76-87a98e2118a9\") " Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.205385 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle" (OuterVolumeSpecName: "bundle") pod "e7cd1841-eae4-4fe0-ac76-87a98e2118a9" (UID: "e7cd1841-eae4-4fe0-ac76-87a98e2118a9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.209979 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw" (OuterVolumeSpecName: "kube-api-access-hmhmw") pod "e7cd1841-eae4-4fe0-ac76-87a98e2118a9" (UID: "e7cd1841-eae4-4fe0-ac76-87a98e2118a9"). InnerVolumeSpecName "kube-api-access-hmhmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.227112 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util" (OuterVolumeSpecName: "util") pod "e7cd1841-eae4-4fe0-ac76-87a98e2118a9" (UID: "e7cd1841-eae4-4fe0-ac76-87a98e2118a9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.307367 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmhmw\" (UniqueName: \"kubernetes.io/projected/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-kube-api-access-hmhmw\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.307449 4852 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-util\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.307478 4852 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e7cd1841-eae4-4fe0-ac76-87a98e2118a9-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.771357 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" event={"ID":"e7cd1841-eae4-4fe0-ac76-87a98e2118a9","Type":"ContainerDied","Data":"953b9e2a4b1d2b248060ed00bd66057ce79ed5d3538bd0d9e4764922390b7233"} Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.771413 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="953b9e2a4b1d2b248060ed00bd66057ce79ed5d3538bd0d9e4764922390b7233" Jan 29 10:53:44 crc kubenswrapper[4852]: I0129 10:53:44.771416 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.252063 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9rpdb"] Jan 29 10:53:49 crc kubenswrapper[4852]: E0129 10:53:49.252674 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="util" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.252692 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="util" Jan 29 10:53:49 crc kubenswrapper[4852]: E0129 10:53:49.252702 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="pull" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.252710 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="pull" Jan 29 10:53:49 crc kubenswrapper[4852]: E0129 10:53:49.252730 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="extract" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.252739 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="extract" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.252874 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7cd1841-eae4-4fe0-ac76-87a98e2118a9" containerName="extract" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.253356 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.256045 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-cjtg5" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.256388 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.256479 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.261930 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9rpdb"] Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.368237 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9xds\" (UniqueName: \"kubernetes.io/projected/1453c2c3-5de9-4ea5-91e1-d19133f4877d-kube-api-access-m9xds\") pod \"nmstate-operator-646758c888-9rpdb\" (UID: \"1453c2c3-5de9-4ea5-91e1-d19133f4877d\") " pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.469219 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9xds\" (UniqueName: \"kubernetes.io/projected/1453c2c3-5de9-4ea5-91e1-d19133f4877d-kube-api-access-m9xds\") pod \"nmstate-operator-646758c888-9rpdb\" (UID: \"1453c2c3-5de9-4ea5-91e1-d19133f4877d\") " pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.487185 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9xds\" (UniqueName: \"kubernetes.io/projected/1453c2c3-5de9-4ea5-91e1-d19133f4877d-kube-api-access-m9xds\") pod \"nmstate-operator-646758c888-9rpdb\" (UID: \"1453c2c3-5de9-4ea5-91e1-d19133f4877d\") " pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.567955 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.734335 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9rpdb"] Jan 29 10:53:49 crc kubenswrapper[4852]: I0129 10:53:49.802652 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" event={"ID":"1453c2c3-5de9-4ea5-91e1-d19133f4877d","Type":"ContainerStarted","Data":"9a9f2bdfd8ae91fb97e6b6697562d856b61b490bf9872261ae0c90749bf90633"} Jan 29 10:53:52 crc kubenswrapper[4852]: I0129 10:53:52.819713 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" event={"ID":"1453c2c3-5de9-4ea5-91e1-d19133f4877d","Type":"ContainerStarted","Data":"e3816de2b975e28471c7ffd26fe3288fdda8aa050ccf5a814e5d797c2c533b2c"} Jan 29 10:53:52 crc kubenswrapper[4852]: I0129 10:53:52.838846 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-9rpdb" podStartSLOduration=1.7027984059999999 podStartE2EDuration="3.838827412s" podCreationTimestamp="2026-01-29 10:53:49 +0000 UTC" firstStartedPulling="2026-01-29 10:53:49.742109141 +0000 UTC m=+726.959440275" lastFinishedPulling="2026-01-29 10:53:51.878138147 +0000 UTC m=+729.095469281" observedRunningTime="2026-01-29 10:53:52.833945241 +0000 UTC m=+730.051276385" watchObservedRunningTime="2026-01-29 10:53:52.838827412 +0000 UTC m=+730.056158566" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.850871 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-zp86n"] Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.852264 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.854026 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-xv4nj" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.859046 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-zp86n"] Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.862821 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj"] Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.863493 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.866085 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.879213 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj"] Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.895327 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-bwjjq"] Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.896073 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.973516 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.973602 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtpt8\" (UniqueName: \"kubernetes.io/projected/bdf996a1-0482-4e91-8a82-99a9319a4711-kube-api-access-wtpt8\") pod \"nmstate-metrics-54757c584b-zp86n\" (UID: \"bdf996a1-0482-4e91-8a82-99a9319a4711\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" Jan 29 10:53:57 crc kubenswrapper[4852]: I0129 10:53:57.973742 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vdzt\" (UniqueName: \"kubernetes.io/projected/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-kube-api-access-5vdzt\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.018607 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj"] Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.019257 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.025783 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.025820 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-gwdtc" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.028443 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.041686 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj"] Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074594 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtpt8\" (UniqueName: \"kubernetes.io/projected/bdf996a1-0482-4e91-8a82-99a9319a4711-kube-api-access-wtpt8\") pod \"nmstate-metrics-54757c584b-zp86n\" (UID: \"bdf996a1-0482-4e91-8a82-99a9319a4711\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqlfp\" (UniqueName: \"kubernetes.io/projected/b978ce05-e091-4dc5-814d-47378f31ea22-kube-api-access-nqlfp\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074701 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-dbus-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074728 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-ovs-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074752 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vdzt\" (UniqueName: \"kubernetes.io/projected/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-kube-api-access-5vdzt\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074791 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.074816 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-nmstate-lock\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.092774 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.097188 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtpt8\" (UniqueName: \"kubernetes.io/projected/bdf996a1-0482-4e91-8a82-99a9319a4711-kube-api-access-wtpt8\") pod \"nmstate-metrics-54757c584b-zp86n\" (UID: \"bdf996a1-0482-4e91-8a82-99a9319a4711\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.100277 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vdzt\" (UniqueName: \"kubernetes.io/projected/bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0-kube-api-access-5vdzt\") pod \"nmstate-webhook-8474b5b9d8-ldknj\" (UID: \"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.169134 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176402 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqlfp\" (UniqueName: \"kubernetes.io/projected/b978ce05-e091-4dc5-814d-47378f31ea22-kube-api-access-nqlfp\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176462 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6ca6b7d4-7a27-4561-886b-045873d5a78a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176497 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-dbus-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176524 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-ovs-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176607 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca6b7d4-7a27-4561-886b-045873d5a78a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176642 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-nmstate-lock\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.176666 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8tlh\" (UniqueName: \"kubernetes.io/projected/6ca6b7d4-7a27-4561-886b-045873d5a78a-kube-api-access-f8tlh\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.177202 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-ovs-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.177271 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-nmstate-lock\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.177312 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b978ce05-e091-4dc5-814d-47378f31ea22-dbus-socket\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.200205 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqlfp\" (UniqueName: \"kubernetes.io/projected/b978ce05-e091-4dc5-814d-47378f31ea22-kube-api-access-nqlfp\") pod \"nmstate-handler-bwjjq\" (UID: \"b978ce05-e091-4dc5-814d-47378f31ea22\") " pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.201385 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-d7cc9b9fd-6649m"] Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.202010 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.227787 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.231171 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.235931 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d7cc9b9fd-6649m"] Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.292800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.292895 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca6b7d4-7a27-4561-886b-045873d5a78a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.292971 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6lkd\" (UniqueName: \"kubernetes.io/projected/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-kube-api-access-j6lkd\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293653 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8tlh\" (UniqueName: \"kubernetes.io/projected/6ca6b7d4-7a27-4561-886b-045873d5a78a-kube-api-access-f8tlh\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293688 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-oauth-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293754 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-oauth-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293787 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6ca6b7d4-7a27-4561-886b-045873d5a78a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293811 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.293860 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-trusted-ca-bundle\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.294398 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-service-ca\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.294675 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6ca6b7d4-7a27-4561-886b-045873d5a78a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.304087 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca6b7d4-7a27-4561-886b-045873d5a78a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.317805 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8tlh\" (UniqueName: \"kubernetes.io/projected/6ca6b7d4-7a27-4561-886b-045873d5a78a-kube-api-access-f8tlh\") pod \"nmstate-console-plugin-7754f76f8b-wpgfj\" (UID: \"6ca6b7d4-7a27-4561-886b-045873d5a78a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.349751 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395405 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395482 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6lkd\" (UniqueName: \"kubernetes.io/projected/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-kube-api-access-j6lkd\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395514 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-oauth-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395616 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-oauth-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395639 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395658 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-trusted-ca-bundle\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.395676 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-service-ca\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.396364 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-service-ca\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.396887 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-oauth-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.398033 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-trusted-ca-bundle\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.401006 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-oauth-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.401320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-serving-cert\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.413425 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6lkd\" (UniqueName: \"kubernetes.io/projected/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-kube-api-access-j6lkd\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.416963 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb-console-config\") pod \"console-d7cc9b9fd-6649m\" (UID: \"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb\") " pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.454610 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-zp86n"] Jan 29 10:53:58 crc kubenswrapper[4852]: W0129 10:53:58.463033 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbdf996a1_0482_4e91_8a82_99a9319a4711.slice/crio-ec345e3bd1ab3f903ca0ce28315894cb25c037811bb374ff81b45aa3625f6dcb WatchSource:0}: Error finding container ec345e3bd1ab3f903ca0ce28315894cb25c037811bb374ff81b45aa3625f6dcb: Status 404 returned error can't find the container with id ec345e3bd1ab3f903ca0ce28315894cb25c037811bb374ff81b45aa3625f6dcb Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.517317 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj"] Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.573011 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj"] Jan 29 10:53:58 crc kubenswrapper[4852]: W0129 10:53:58.577253 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ca6b7d4_7a27_4561_886b_045873d5a78a.slice/crio-76d9e95e702575fefa87ba4eaad42cd749aaa18dafdd0d9d73bfa5c0d0e4f729 WatchSource:0}: Error finding container 76d9e95e702575fefa87ba4eaad42cd749aaa18dafdd0d9d73bfa5c0d0e4f729: Status 404 returned error can't find the container with id 76d9e95e702575fefa87ba4eaad42cd749aaa18dafdd0d9d73bfa5c0d0e4f729 Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.607270 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.778531 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d7cc9b9fd-6649m"] Jan 29 10:53:58 crc kubenswrapper[4852]: W0129 10:53:58.785912 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod503f0fc0_1625_4f1a_b4c4_7961c5d8e1cb.slice/crio-c1106c323e22a889e88443c222c0edd9b21563307d4fb61f1ddec189951e3ec0 WatchSource:0}: Error finding container c1106c323e22a889e88443c222c0edd9b21563307d4fb61f1ddec189951e3ec0: Status 404 returned error can't find the container with id c1106c323e22a889e88443c222c0edd9b21563307d4fb61f1ddec189951e3ec0 Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.853021 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bwjjq" event={"ID":"b978ce05-e091-4dc5-814d-47378f31ea22","Type":"ContainerStarted","Data":"65904c68d745ec9fc1758fc47b26d2bbbf4aee2d2e5d999d845682af4df32adc"} Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.853819 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" event={"ID":"6ca6b7d4-7a27-4561-886b-045873d5a78a","Type":"ContainerStarted","Data":"76d9e95e702575fefa87ba4eaad42cd749aaa18dafdd0d9d73bfa5c0d0e4f729"} Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.854819 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d7cc9b9fd-6649m" event={"ID":"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb","Type":"ContainerStarted","Data":"c1106c323e22a889e88443c222c0edd9b21563307d4fb61f1ddec189951e3ec0"} Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.855889 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" event={"ID":"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0","Type":"ContainerStarted","Data":"c46972ac2b44c600de26c27ed3bf90deffad38525ce37e621e86eadfa494b91a"} Jan 29 10:53:58 crc kubenswrapper[4852]: I0129 10:53:58.857207 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" event={"ID":"bdf996a1-0482-4e91-8a82-99a9319a4711","Type":"ContainerStarted","Data":"ec345e3bd1ab3f903ca0ce28315894cb25c037811bb374ff81b45aa3625f6dcb"} Jan 29 10:53:59 crc kubenswrapper[4852]: I0129 10:53:59.865167 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d7cc9b9fd-6649m" event={"ID":"503f0fc0-1625-4f1a-b4c4-7961c5d8e1cb","Type":"ContainerStarted","Data":"3cf1866bf5fcbf5943a61a74ee0bc29aa481b1482c2a2057c6a1e7415717860d"} Jan 29 10:53:59 crc kubenswrapper[4852]: I0129 10:53:59.890868 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-d7cc9b9fd-6649m" podStartSLOduration=1.890841774 podStartE2EDuration="1.890841774s" podCreationTimestamp="2026-01-29 10:53:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:53:59.885952184 +0000 UTC m=+737.103283348" watchObservedRunningTime="2026-01-29 10:53:59.890841774 +0000 UTC m=+737.108172898" Jan 29 10:54:00 crc kubenswrapper[4852]: I0129 10:54:00.017398 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:54:00 crc kubenswrapper[4852]: I0129 10:54:00.017454 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.886660 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" event={"ID":"6ca6b7d4-7a27-4561-886b-045873d5a78a","Type":"ContainerStarted","Data":"eafe2f9e185539ffad3000e4b4169446c9cc6217f6853e22818268ebbe8d5444"} Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.891469 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" event={"ID":"bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0","Type":"ContainerStarted","Data":"9f2a6d0e1a0439d2233539758098ff8eb4e755b30e7d54c2f972eedea790776b"} Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.892171 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.893514 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" event={"ID":"bdf996a1-0482-4e91-8a82-99a9319a4711","Type":"ContainerStarted","Data":"cb0af3ec9e73c991629b8aea6e92be8d31d5b95f499de732ce6bf90836865877"} Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.894650 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bwjjq" event={"ID":"b978ce05-e091-4dc5-814d-47378f31ea22","Type":"ContainerStarted","Data":"6aefb6e65c973055a1877faa7c807ec85efbbd6749dd8b961f89e076013d130f"} Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.895085 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.916561 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wpgfj" podStartSLOduration=2.061328689 podStartE2EDuration="4.916533983s" podCreationTimestamp="2026-01-29 10:53:57 +0000 UTC" firstStartedPulling="2026-01-29 10:53:58.579180448 +0000 UTC m=+735.796511582" lastFinishedPulling="2026-01-29 10:54:01.434385742 +0000 UTC m=+738.651716876" observedRunningTime="2026-01-29 10:54:01.901152545 +0000 UTC m=+739.118483689" watchObservedRunningTime="2026-01-29 10:54:01.916533983 +0000 UTC m=+739.133865117" Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.945008 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" podStartSLOduration=2.002931751 podStartE2EDuration="4.944986994s" podCreationTimestamp="2026-01-29 10:53:57 +0000 UTC" firstStartedPulling="2026-01-29 10:53:58.522796719 +0000 UTC m=+735.740127853" lastFinishedPulling="2026-01-29 10:54:01.464851962 +0000 UTC m=+738.682183096" observedRunningTime="2026-01-29 10:54:01.934928266 +0000 UTC m=+739.152259400" watchObservedRunningTime="2026-01-29 10:54:01.944986994 +0000 UTC m=+739.162318128" Jan 29 10:54:01 crc kubenswrapper[4852]: I0129 10:54:01.963652 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-bwjjq" podStartSLOduration=1.849611916 podStartE2EDuration="4.963626493s" podCreationTimestamp="2026-01-29 10:53:57 +0000 UTC" firstStartedPulling="2026-01-29 10:53:58.313712331 +0000 UTC m=+735.531043465" lastFinishedPulling="2026-01-29 10:54:01.427726908 +0000 UTC m=+738.645058042" observedRunningTime="2026-01-29 10:54:01.952059908 +0000 UTC m=+739.169391052" watchObservedRunningTime="2026-01-29 10:54:01.963626493 +0000 UTC m=+739.180957647" Jan 29 10:54:03 crc kubenswrapper[4852]: I0129 10:54:03.907313 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" event={"ID":"bdf996a1-0482-4e91-8a82-99a9319a4711","Type":"ContainerStarted","Data":"a3d4425c7dffe7d0d81ed00f915a43692919aff507b29b01eff1c5fd37f5a162"} Jan 29 10:54:03 crc kubenswrapper[4852]: I0129 10:54:03.927980 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-zp86n" podStartSLOduration=1.6812132100000001 podStartE2EDuration="6.927960132s" podCreationTimestamp="2026-01-29 10:53:57 +0000 UTC" firstStartedPulling="2026-01-29 10:53:58.468376229 +0000 UTC m=+735.685707373" lastFinishedPulling="2026-01-29 10:54:03.715123161 +0000 UTC m=+740.932454295" observedRunningTime="2026-01-29 10:54:03.925968552 +0000 UTC m=+741.143299706" watchObservedRunningTime="2026-01-29 10:54:03.927960132 +0000 UTC m=+741.145291256" Jan 29 10:54:08 crc kubenswrapper[4852]: I0129 10:54:08.254199 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-bwjjq" Jan 29 10:54:08 crc kubenswrapper[4852]: I0129 10:54:08.608132 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:54:08 crc kubenswrapper[4852]: I0129 10:54:08.608207 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:54:08 crc kubenswrapper[4852]: I0129 10:54:08.612557 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:54:09 crc kubenswrapper[4852]: I0129 10:54:09.136385 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-d7cc9b9fd-6649m" Jan 29 10:54:09 crc kubenswrapper[4852]: I0129 10:54:09.182925 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:54:16 crc kubenswrapper[4852]: I0129 10:54:16.905957 4852 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 10:54:18 crc kubenswrapper[4852]: I0129 10:54:18.233879 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-ldknj" Jan 29 10:54:30 crc kubenswrapper[4852]: I0129 10:54:30.017009 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:54:30 crc kubenswrapper[4852]: I0129 10:54:30.017794 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.284673 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf"] Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.286083 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.292682 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.300610 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf"] Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.357364 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.357421 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.357467 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtt77\" (UniqueName: \"kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.459041 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.459116 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.459190 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtt77\" (UniqueName: \"kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.460048 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.460073 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.483814 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtt77\" (UniqueName: \"kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.610393 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:32 crc kubenswrapper[4852]: I0129 10:54:32.807444 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf"] Jan 29 10:54:33 crc kubenswrapper[4852]: I0129 10:54:33.268072 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerID="1e7da5da4a9cd1bdf642e97038b88800100230bd1f3a567c9a0c9658114e07bb" exitCode=0 Jan 29 10:54:33 crc kubenswrapper[4852]: I0129 10:54:33.268140 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerDied","Data":"1e7da5da4a9cd1bdf642e97038b88800100230bd1f3a567c9a0c9658114e07bb"} Jan 29 10:54:33 crc kubenswrapper[4852]: I0129 10:54:33.268387 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerStarted","Data":"434be2190f50bbd0611d79ce1c6d5c30f1b07cf17c779c0d4982d32c8b2008d5"} Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.245824 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-dcnm7" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerName="console" containerID="cri-o://8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44" gracePeriod=15 Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.596965 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-dcnm7_bc753007-98cd-4b3b-ab70-6035482f7c5e/console/0.log" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.597217 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691095 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rttwq\" (UniqueName: \"kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691120 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691147 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691165 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691184 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691204 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert\") pod \"bc753007-98cd-4b3b-ab70-6035482f7c5e\" (UID: \"bc753007-98cd-4b3b-ab70-6035482f7c5e\") " Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.691921 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.692013 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca" (OuterVolumeSpecName: "service-ca") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.692290 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.692431 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config" (OuterVolumeSpecName: "console-config") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.696971 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.697195 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq" (OuterVolumeSpecName: "kube-api-access-rttwq") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "kube-api-access-rttwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.697406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "bc753007-98cd-4b3b-ab70-6035482f7c5e" (UID: "bc753007-98cd-4b3b-ab70-6035482f7c5e"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.792943 4852 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.792989 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rttwq\" (UniqueName: \"kubernetes.io/projected/bc753007-98cd-4b3b-ab70-6035482f7c5e-kube-api-access-rttwq\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.793000 4852 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.793017 4852 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.793025 4852 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.793032 4852 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bc753007-98cd-4b3b-ab70-6035482f7c5e-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:34 crc kubenswrapper[4852]: I0129 10:54:34.793043 4852 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bc753007-98cd-4b3b-ab70-6035482f7c5e-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.284148 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerID="be95866b15f68b3e2671dfb97d4aad695e10bcd2f5bef29308a450e937dc256c" exitCode=0 Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.284259 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerDied","Data":"be95866b15f68b3e2671dfb97d4aad695e10bcd2f5bef29308a450e937dc256c"} Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289249 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-dcnm7_bc753007-98cd-4b3b-ab70-6035482f7c5e/console/0.log" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289300 4852 generic.go:334] "Generic (PLEG): container finished" podID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerID="8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44" exitCode=2 Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289330 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-dcnm7" event={"ID":"bc753007-98cd-4b3b-ab70-6035482f7c5e","Type":"ContainerDied","Data":"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44"} Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289355 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-dcnm7" event={"ID":"bc753007-98cd-4b3b-ab70-6035482f7c5e","Type":"ContainerDied","Data":"9a721f661ca10efb802fb4dc7e96ad1c66715c47e3d123937aa063edf6bed6f5"} Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289371 4852 scope.go:117] "RemoveContainer" containerID="8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.289712 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-dcnm7" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.327729 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.331249 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-dcnm7"] Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.331647 4852 scope.go:117] "RemoveContainer" containerID="8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44" Jan 29 10:54:35 crc kubenswrapper[4852]: E0129 10:54:35.332107 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44\": container with ID starting with 8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44 not found: ID does not exist" containerID="8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.332144 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44"} err="failed to get container status \"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44\": rpc error: code = NotFound desc = could not find container \"8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44\": container with ID starting with 8e9604c18ede13c485cc5880715d2ea72be4b77f2c004e3485e5f76aa67aac44 not found: ID does not exist" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.470454 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" path="/var/lib/kubelet/pods/bc753007-98cd-4b3b-ab70-6035482f7c5e/volumes" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.850885 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:35 crc kubenswrapper[4852]: E0129 10:54:35.851643 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerName="console" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.851674 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerName="console" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.851847 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc753007-98cd-4b3b-ab70-6035482f7c5e" containerName="console" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.854079 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.862276 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.905774 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl2jk\" (UniqueName: \"kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.906064 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:35 crc kubenswrapper[4852]: I0129 10:54:35.906193 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.007806 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl2jk\" (UniqueName: \"kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.007877 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.007933 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.008412 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.008504 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.027611 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl2jk\" (UniqueName: \"kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk\") pod \"redhat-operators-r2v9q\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.174144 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.298978 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerStarted","Data":"4fe42930bc4f8a1096b5755415b71c0d54dc998dfd6e414f877e285552379794"} Jan 29 10:54:36 crc kubenswrapper[4852]: I0129 10:54:36.379291 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:37 crc kubenswrapper[4852]: I0129 10:54:37.315660 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerID="4fe42930bc4f8a1096b5755415b71c0d54dc998dfd6e414f877e285552379794" exitCode=0 Jan 29 10:54:37 crc kubenswrapper[4852]: I0129 10:54:37.315724 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerDied","Data":"4fe42930bc4f8a1096b5755415b71c0d54dc998dfd6e414f877e285552379794"} Jan 29 10:54:37 crc kubenswrapper[4852]: I0129 10:54:37.318273 4852 generic.go:334] "Generic (PLEG): container finished" podID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerID="393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764" exitCode=0 Jan 29 10:54:37 crc kubenswrapper[4852]: I0129 10:54:37.318307 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerDied","Data":"393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764"} Jan 29 10:54:37 crc kubenswrapper[4852]: I0129 10:54:37.318326 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerStarted","Data":"96d7499a6b9ac0bdf8a74a6c998fc3182a712509e2ef4a5ec7357a7639900b64"} Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.538421 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.641061 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util\") pod \"ca05a8d8-1629-4d04-9ab9-0da017f65631\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.641177 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle\") pod \"ca05a8d8-1629-4d04-9ab9-0da017f65631\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.641244 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtt77\" (UniqueName: \"kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77\") pod \"ca05a8d8-1629-4d04-9ab9-0da017f65631\" (UID: \"ca05a8d8-1629-4d04-9ab9-0da017f65631\") " Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.642602 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle" (OuterVolumeSpecName: "bundle") pod "ca05a8d8-1629-4d04-9ab9-0da017f65631" (UID: "ca05a8d8-1629-4d04-9ab9-0da017f65631"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.646282 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77" (OuterVolumeSpecName: "kube-api-access-xtt77") pod "ca05a8d8-1629-4d04-9ab9-0da017f65631" (UID: "ca05a8d8-1629-4d04-9ab9-0da017f65631"). InnerVolumeSpecName "kube-api-access-xtt77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.742442 4852 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.742489 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtt77\" (UniqueName: \"kubernetes.io/projected/ca05a8d8-1629-4d04-9ab9-0da017f65631-kube-api-access-xtt77\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.863501 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util" (OuterVolumeSpecName: "util") pod "ca05a8d8-1629-4d04-9ab9-0da017f65631" (UID: "ca05a8d8-1629-4d04-9ab9-0da017f65631"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:54:38 crc kubenswrapper[4852]: I0129 10:54:38.945089 4852 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca05a8d8-1629-4d04-9ab9-0da017f65631-util\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:39 crc kubenswrapper[4852]: I0129 10:54:39.337034 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" event={"ID":"ca05a8d8-1629-4d04-9ab9-0da017f65631","Type":"ContainerDied","Data":"434be2190f50bbd0611d79ce1c6d5c30f1b07cf17c779c0d4982d32c8b2008d5"} Jan 29 10:54:39 crc kubenswrapper[4852]: I0129 10:54:39.337088 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="434be2190f50bbd0611d79ce1c6d5c30f1b07cf17c779c0d4982d32c8b2008d5" Jan 29 10:54:39 crc kubenswrapper[4852]: I0129 10:54:39.337173 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf" Jan 29 10:54:39 crc kubenswrapper[4852]: I0129 10:54:39.344155 4852 generic.go:334] "Generic (PLEG): container finished" podID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerID="7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771" exitCode=0 Jan 29 10:54:39 crc kubenswrapper[4852]: I0129 10:54:39.344215 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerDied","Data":"7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771"} Jan 29 10:54:40 crc kubenswrapper[4852]: I0129 10:54:40.351554 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerStarted","Data":"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e"} Jan 29 10:54:40 crc kubenswrapper[4852]: I0129 10:54:40.371848 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r2v9q" podStartSLOduration=2.782741173 podStartE2EDuration="5.371829055s" podCreationTimestamp="2026-01-29 10:54:35 +0000 UTC" firstStartedPulling="2026-01-29 10:54:37.320300946 +0000 UTC m=+774.537632080" lastFinishedPulling="2026-01-29 10:54:39.909388788 +0000 UTC m=+777.126719962" observedRunningTime="2026-01-29 10:54:40.369286403 +0000 UTC m=+777.586617537" watchObservedRunningTime="2026-01-29 10:54:40.371829055 +0000 UTC m=+777.589160189" Jan 29 10:54:46 crc kubenswrapper[4852]: I0129 10:54:46.174753 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:46 crc kubenswrapper[4852]: I0129 10:54:46.175080 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:46 crc kubenswrapper[4852]: I0129 10:54:46.232039 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:46 crc kubenswrapper[4852]: I0129 10:54:46.423793 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:49 crc kubenswrapper[4852]: I0129 10:54:49.840885 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:49 crc kubenswrapper[4852]: I0129 10:54:49.841368 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r2v9q" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="registry-server" containerID="cri-o://62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e" gracePeriod=2 Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.183059 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.285706 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities\") pod \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.285750 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content\") pod \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.285781 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl2jk\" (UniqueName: \"kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk\") pod \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\" (UID: \"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd\") " Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.286628 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities" (OuterVolumeSpecName: "utilities") pod "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" (UID: "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.291208 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk" (OuterVolumeSpecName: "kube-api-access-xl2jk") pod "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" (UID: "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd"). InnerVolumeSpecName "kube-api-access-xl2jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.391372 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.392026 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl2jk\" (UniqueName: \"kubernetes.io/projected/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-kube-api-access-xl2jk\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.419150 4852 generic.go:334] "Generic (PLEG): container finished" podID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerID="62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e" exitCode=0 Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.419191 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerDied","Data":"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e"} Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.419217 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2v9q" event={"ID":"4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd","Type":"ContainerDied","Data":"96d7499a6b9ac0bdf8a74a6c998fc3182a712509e2ef4a5ec7357a7639900b64"} Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.419232 4852 scope.go:117] "RemoveContainer" containerID="62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.419372 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2v9q" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.437814 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" (UID: "4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.450876 4852 scope.go:117] "RemoveContainer" containerID="7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.496269 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.500274 4852 scope.go:117] "RemoveContainer" containerID="393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.527743 4852 scope.go:117] "RemoveContainer" containerID="62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.528571 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e\": container with ID starting with 62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e not found: ID does not exist" containerID="62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.528620 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e"} err="failed to get container status \"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e\": rpc error: code = NotFound desc = could not find container \"62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e\": container with ID starting with 62c8d8320067cc8650957d5b43ab97ca6f2329d818fa03b463ccb808277b087e not found: ID does not exist" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.528639 4852 scope.go:117] "RemoveContainer" containerID="7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.529070 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771\": container with ID starting with 7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771 not found: ID does not exist" containerID="7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.529110 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771"} err="failed to get container status \"7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771\": rpc error: code = NotFound desc = could not find container \"7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771\": container with ID starting with 7a1c686586f892e1337f4a7f157da8690ef67b08a1e2e06e0102fa2b62680771 not found: ID does not exist" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.529138 4852 scope.go:117] "RemoveContainer" containerID="393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.529376 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764\": container with ID starting with 393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764 not found: ID does not exist" containerID="393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.529400 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764"} err="failed to get container status \"393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764\": rpc error: code = NotFound desc = could not find container \"393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764\": container with ID starting with 393955b0497a3432210e9a8eb0015583ae7a491120af5ad55b33fcb6beee4764 not found: ID does not exist" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.619932 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg"] Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.620422 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="registry-server" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.620506 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="registry-server" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.620595 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="extract" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.620693 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="extract" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.620779 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="extract-utilities" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.620853 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="extract-utilities" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.620915 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="util" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.620981 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="util" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.621047 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="extract-content" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.621095 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="extract-content" Jan 29 10:54:50 crc kubenswrapper[4852]: E0129 10:54:50.621171 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="pull" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.621220 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="pull" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.621432 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" containerName="registry-server" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.621518 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca05a8d8-1629-4d04-9ab9-0da017f65631" containerName="extract" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.622211 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.625363 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.625556 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.625700 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.625631 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-nplmh" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.625661 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.633895 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg"] Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.756346 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.758507 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r2v9q"] Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.799685 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-webhook-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.800068 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x55qv\" (UniqueName: \"kubernetes.io/projected/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-kube-api-access-x55qv\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.800253 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-apiservice-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.900927 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-webhook-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.900983 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x55qv\" (UniqueName: \"kubernetes.io/projected/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-kube-api-access-x55qv\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.901025 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-apiservice-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.906697 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-webhook-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.907243 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-apiservice-cert\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.921096 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x55qv\" (UniqueName: \"kubernetes.io/projected/00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9-kube-api-access-x55qv\") pod \"metallb-operator-controller-manager-6445fdf5fb-tfjfg\" (UID: \"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9\") " pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.940459 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.947775 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4"] Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.948993 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.953338 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.953783 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.955606 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-k8jq5" Jan 29 10:54:50 crc kubenswrapper[4852]: I0129 10:54:50.956235 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4"] Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.001728 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-apiservice-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.001818 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzlfj\" (UniqueName: \"kubernetes.io/projected/2131fae0-adff-4b95-8e86-5915ad516749-kube-api-access-fzlfj\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.001862 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-webhook-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.113379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-apiservice-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.113910 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzlfj\" (UniqueName: \"kubernetes.io/projected/2131fae0-adff-4b95-8e86-5915ad516749-kube-api-access-fzlfj\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.113942 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-webhook-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.120222 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-apiservice-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.122498 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2131fae0-adff-4b95-8e86-5915ad516749-webhook-cert\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.137281 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzlfj\" (UniqueName: \"kubernetes.io/projected/2131fae0-adff-4b95-8e86-5915ad516749-kube-api-access-fzlfj\") pod \"metallb-operator-webhook-server-785876fb9c-fnwz4\" (UID: \"2131fae0-adff-4b95-8e86-5915ad516749\") " pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.181654 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg"] Jan 29 10:54:51 crc kubenswrapper[4852]: W0129 10:54:51.182743 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00b7e7c3_bd65_4c98_aff5_4fbd340b4ad9.slice/crio-92d3bf4e40a1dc87a53d2baa4eae576f2ddfb98d2e592da81a5eb2624002f9d3 WatchSource:0}: Error finding container 92d3bf4e40a1dc87a53d2baa4eae576f2ddfb98d2e592da81a5eb2624002f9d3: Status 404 returned error can't find the container with id 92d3bf4e40a1dc87a53d2baa4eae576f2ddfb98d2e592da81a5eb2624002f9d3 Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.264092 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.440789 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" event={"ID":"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9","Type":"ContainerStarted","Data":"92d3bf4e40a1dc87a53d2baa4eae576f2ddfb98d2e592da81a5eb2624002f9d3"} Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.471419 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd" path="/var/lib/kubelet/pods/4c3103ef-a7fa-4df6-a2e4-ebba0d8339fd/volumes" Jan 29 10:54:51 crc kubenswrapper[4852]: I0129 10:54:51.558563 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4"] Jan 29 10:54:51 crc kubenswrapper[4852]: W0129 10:54:51.563935 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2131fae0_adff_4b95_8e86_5915ad516749.slice/crio-4a79ae4ef5b782082ab3ed0423efaff0594ba90b78f9fd9d9900939f9d8c074b WatchSource:0}: Error finding container 4a79ae4ef5b782082ab3ed0423efaff0594ba90b78f9fd9d9900939f9d8c074b: Status 404 returned error can't find the container with id 4a79ae4ef5b782082ab3ed0423efaff0594ba90b78f9fd9d9900939f9d8c074b Jan 29 10:54:52 crc kubenswrapper[4852]: I0129 10:54:52.445599 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" event={"ID":"2131fae0-adff-4b95-8e86-5915ad516749","Type":"ContainerStarted","Data":"4a79ae4ef5b782082ab3ed0423efaff0594ba90b78f9fd9d9900939f9d8c074b"} Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.477662 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" event={"ID":"00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9","Type":"ContainerStarted","Data":"8486de530d95ff788e8808da1e8a5dadde4d7fdf6b80156f92687b581fd70367"} Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.478077 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.479640 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" event={"ID":"2131fae0-adff-4b95-8e86-5915ad516749","Type":"ContainerStarted","Data":"c4347264b81a6ddb4e48d30aeb79500f7c374c78ca0b3568637cc3b41138bc0c"} Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.479774 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.524651 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" podStartSLOduration=2.983517915 podStartE2EDuration="7.524626762s" podCreationTimestamp="2026-01-29 10:54:50 +0000 UTC" firstStartedPulling="2026-01-29 10:54:51.568921224 +0000 UTC m=+788.786252358" lastFinishedPulling="2026-01-29 10:54:56.110030071 +0000 UTC m=+793.327361205" observedRunningTime="2026-01-29 10:54:57.518603274 +0000 UTC m=+794.735934418" watchObservedRunningTime="2026-01-29 10:54:57.524626762 +0000 UTC m=+794.741957936" Jan 29 10:54:57 crc kubenswrapper[4852]: I0129 10:54:57.524820 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" podStartSLOduration=2.910735934 podStartE2EDuration="7.524811727s" podCreationTimestamp="2026-01-29 10:54:50 +0000 UTC" firstStartedPulling="2026-01-29 10:54:51.188841795 +0000 UTC m=+788.406172919" lastFinishedPulling="2026-01-29 10:54:55.802917578 +0000 UTC m=+793.020248712" observedRunningTime="2026-01-29 10:54:57.496752486 +0000 UTC m=+794.714083620" watchObservedRunningTime="2026-01-29 10:54:57.524811727 +0000 UTC m=+794.742142901" Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.016755 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.017315 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.017397 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.018284 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.018344 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c" gracePeriod=600 Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.498767 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c" exitCode=0 Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.498838 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c"} Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.499101 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0"} Jan 29 10:55:00 crc kubenswrapper[4852]: I0129 10:55:00.499124 4852 scope.go:117] "RemoveContainer" containerID="2b7a5150bf5ab624a6ac763355fa0b8d32e9873f889978a385a8f1865aad054e" Jan 29 10:55:11 crc kubenswrapper[4852]: I0129 10:55:11.267932 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-785876fb9c-fnwz4" Jan 29 10:55:30 crc kubenswrapper[4852]: I0129 10:55:30.944290 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6445fdf5fb-tfjfg" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.604381 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-rgc7d"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.607812 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.610070 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.611141 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.611368 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-6l5v8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.612910 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.613626 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.618240 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.626093 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.639894 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-reloader\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.639960 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-conf\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.639982 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fc7e862-7101-4e92-9429-567296738d25-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640042 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640059 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-sockets\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640091 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-startup\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640191 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjs89\" (UniqueName: \"kubernetes.io/projected/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-kube-api-access-sjs89\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640271 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.640304 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n6rb\" (UniqueName: \"kubernetes.io/projected/1fc7e862-7101-4e92-9429-567296738d25-kube-api-access-5n6rb\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.707564 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-rtdgk"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.708405 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.717147 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-mtg5b"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.720441 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.726011 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.726083 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.726016 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.726320 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-4w28k" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.726457 4852 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.734734 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mtg5b"] Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746639 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-conf\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746682 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vt7c\" (UniqueName: \"kubernetes.io/projected/bd9b41ba-dc2c-415c-b903-b906d8f96078-kube-api-access-8vt7c\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746703 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd9b41ba-dc2c-415c-b903-b906d8f96078-metallb-excludel2\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746730 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-cert\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746747 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fc7e862-7101-4e92-9429-567296738d25-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746763 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746778 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct645\" (UniqueName: \"kubernetes.io/projected/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-kube-api-access-ct645\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746800 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-sockets\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746818 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746839 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-startup\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746859 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjs89\" (UniqueName: \"kubernetes.io/projected/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-kube-api-access-sjs89\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746894 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-metrics-certs\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746915 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746934 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n6rb\" (UniqueName: \"kubernetes.io/projected/1fc7e862-7101-4e92-9429-567296738d25-kube-api-access-5n6rb\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746953 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-metrics-certs\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.746972 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-reloader\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.747333 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-reloader\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: E0129 10:55:31.747415 4852 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 29 10:55:31 crc kubenswrapper[4852]: E0129 10:55:31.747459 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs podName:fac2b4a6-eb46-4998-bf17-6f7b73b3b43d nodeName:}" failed. No retries permitted until 2026-01-29 10:55:32.24744356 +0000 UTC m=+829.464774694 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs") pod "frr-k8s-rgc7d" (UID: "fac2b4a6-eb46-4998-bf17-6f7b73b3b43d") : secret "frr-k8s-certs-secret" not found Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.747706 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-conf\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.747826 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-sockets\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.749273 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-frr-startup\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.749509 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.753148 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fc7e862-7101-4e92-9429-567296738d25-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.783259 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n6rb\" (UniqueName: \"kubernetes.io/projected/1fc7e862-7101-4e92-9429-567296738d25-kube-api-access-5n6rb\") pod \"frr-k8s-webhook-server-7df86c4f6c-pz8m8\" (UID: \"1fc7e862-7101-4e92-9429-567296738d25\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.784011 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjs89\" (UniqueName: \"kubernetes.io/projected/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-kube-api-access-sjs89\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848467 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-metrics-certs\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848530 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-metrics-certs\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848594 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vt7c\" (UniqueName: \"kubernetes.io/projected/bd9b41ba-dc2c-415c-b903-b906d8f96078-kube-api-access-8vt7c\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848615 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd9b41ba-dc2c-415c-b903-b906d8f96078-metallb-excludel2\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848633 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-cert\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848663 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct645\" (UniqueName: \"kubernetes.io/projected/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-kube-api-access-ct645\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.848684 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: E0129 10:55:31.848767 4852 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 10:55:31 crc kubenswrapper[4852]: E0129 10:55:31.848811 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist podName:bd9b41ba-dc2c-415c-b903-b906d8f96078 nodeName:}" failed. No retries permitted until 2026-01-29 10:55:32.348798225 +0000 UTC m=+829.566129349 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist") pod "speaker-rtdgk" (UID: "bd9b41ba-dc2c-415c-b903-b906d8f96078") : secret "metallb-memberlist" not found Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.849501 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd9b41ba-dc2c-415c-b903-b906d8f96078-metallb-excludel2\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.852504 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-metrics-certs\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.852651 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-metrics-certs\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.854492 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-cert\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.868080 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vt7c\" (UniqueName: \"kubernetes.io/projected/bd9b41ba-dc2c-415c-b903-b906d8f96078-kube-api-access-8vt7c\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.868193 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct645\" (UniqueName: \"kubernetes.io/projected/bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d-kube-api-access-ct645\") pod \"controller-6968d8fdc4-mtg5b\" (UID: \"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d\") " pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:31 crc kubenswrapper[4852]: I0129 10:55:31.938186 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.041748 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.255143 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.267086 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fac2b4a6-eb46-4998-bf17-6f7b73b3b43d-metrics-certs\") pod \"frr-k8s-rgc7d\" (UID: \"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d\") " pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.292559 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mtg5b"] Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.323237 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8"] Jan 29 10:55:32 crc kubenswrapper[4852]: W0129 10:55:32.327144 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fc7e862_7101_4e92_9429_567296738d25.slice/crio-c99685ac2c44045a4b5b4ba5a094efb197b9003856efdab395d4ff0bd916156e WatchSource:0}: Error finding container c99685ac2c44045a4b5b4ba5a094efb197b9003856efdab395d4ff0bd916156e: Status 404 returned error can't find the container with id c99685ac2c44045a4b5b4ba5a094efb197b9003856efdab395d4ff0bd916156e Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.356798 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:32 crc kubenswrapper[4852]: E0129 10:55:32.356965 4852 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 10:55:32 crc kubenswrapper[4852]: E0129 10:55:32.357044 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist podName:bd9b41ba-dc2c-415c-b903-b906d8f96078 nodeName:}" failed. No retries permitted until 2026-01-29 10:55:33.35702376 +0000 UTC m=+830.574354894 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist") pod "speaker-rtdgk" (UID: "bd9b41ba-dc2c-415c-b903-b906d8f96078") : secret "metallb-memberlist" not found Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.529403 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.685921 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"3cb0b50fc99a5d1c47dbf242157e1fb89d034344c3261f7e101db9a344dbf677"} Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.687688 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtg5b" event={"ID":"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d","Type":"ContainerStarted","Data":"a81f62acd614908d1842a647a243becb6d2a0ed87eca3863e450ef0671e87924"} Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.687730 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtg5b" event={"ID":"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d","Type":"ContainerStarted","Data":"759f9afad1864fc0bfc729c0a56f08054caf40ab2d6758c86cc6c5749b6e45d7"} Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.687744 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mtg5b" event={"ID":"bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d","Type":"ContainerStarted","Data":"714cc997120d9c6b6c9bcb74ef52bfd44e07ed270326de2d5c4aa3f9a9083337"} Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.687804 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.688623 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" event={"ID":"1fc7e862-7101-4e92-9429-567296738d25","Type":"ContainerStarted","Data":"c99685ac2c44045a4b5b4ba5a094efb197b9003856efdab395d4ff0bd916156e"} Jan 29 10:55:32 crc kubenswrapper[4852]: I0129 10:55:32.704208 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-mtg5b" podStartSLOduration=1.704190948 podStartE2EDuration="1.704190948s" podCreationTimestamp="2026-01-29 10:55:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:55:32.702742782 +0000 UTC m=+829.920073926" watchObservedRunningTime="2026-01-29 10:55:32.704190948 +0000 UTC m=+829.921522082" Jan 29 10:55:33 crc kubenswrapper[4852]: I0129 10:55:33.370816 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:33 crc kubenswrapper[4852]: I0129 10:55:33.375521 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd9b41ba-dc2c-415c-b903-b906d8f96078-memberlist\") pod \"speaker-rtdgk\" (UID: \"bd9b41ba-dc2c-415c-b903-b906d8f96078\") " pod="metallb-system/speaker-rtdgk" Jan 29 10:55:33 crc kubenswrapper[4852]: I0129 10:55:33.528910 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rtdgk" Jan 29 10:55:33 crc kubenswrapper[4852]: W0129 10:55:33.549395 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd9b41ba_dc2c_415c_b903_b906d8f96078.slice/crio-17b4b524c0546fb86590e6996e66e8edac83f9ad85b80de7dfc698e759e92c7b WatchSource:0}: Error finding container 17b4b524c0546fb86590e6996e66e8edac83f9ad85b80de7dfc698e759e92c7b: Status 404 returned error can't find the container with id 17b4b524c0546fb86590e6996e66e8edac83f9ad85b80de7dfc698e759e92c7b Jan 29 10:55:33 crc kubenswrapper[4852]: I0129 10:55:33.694650 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rtdgk" event={"ID":"bd9b41ba-dc2c-415c-b903-b906d8f96078","Type":"ContainerStarted","Data":"17b4b524c0546fb86590e6996e66e8edac83f9ad85b80de7dfc698e759e92c7b"} Jan 29 10:55:34 crc kubenswrapper[4852]: I0129 10:55:34.705777 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rtdgk" event={"ID":"bd9b41ba-dc2c-415c-b903-b906d8f96078","Type":"ContainerStarted","Data":"d84cbdffcfae7503d8eb2f9a56c260a4793a4e8fb987a245613734deb3d1537a"} Jan 29 10:55:34 crc kubenswrapper[4852]: I0129 10:55:34.706194 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rtdgk" event={"ID":"bd9b41ba-dc2c-415c-b903-b906d8f96078","Type":"ContainerStarted","Data":"f1eeb1122cb2c85fac8bc16c1ed489baa522d5216b356f06c878fc36ba01fe70"} Jan 29 10:55:34 crc kubenswrapper[4852]: I0129 10:55:34.706325 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-rtdgk" Jan 29 10:55:34 crc kubenswrapper[4852]: I0129 10:55:34.729939 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-rtdgk" podStartSLOduration=3.729919198 podStartE2EDuration="3.729919198s" podCreationTimestamp="2026-01-29 10:55:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:55:34.727711934 +0000 UTC m=+831.945043068" watchObservedRunningTime="2026-01-29 10:55:34.729919198 +0000 UTC m=+831.947250332" Jan 29 10:55:40 crc kubenswrapper[4852]: I0129 10:55:40.748122 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" event={"ID":"1fc7e862-7101-4e92-9429-567296738d25","Type":"ContainerStarted","Data":"5d5c8b46cc94a9a9fc85c4b10df9e2a3f2f63c9740791d553a6f9ee4b0992b16"} Jan 29 10:55:40 crc kubenswrapper[4852]: I0129 10:55:40.748489 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:40 crc kubenswrapper[4852]: I0129 10:55:40.750481 4852 generic.go:334] "Generic (PLEG): container finished" podID="fac2b4a6-eb46-4998-bf17-6f7b73b3b43d" containerID="76a60321282bfd9b039ddd1a4c7215d8b70c26a569c6611d4662356609276905" exitCode=0 Jan 29 10:55:40 crc kubenswrapper[4852]: I0129 10:55:40.750528 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerDied","Data":"76a60321282bfd9b039ddd1a4c7215d8b70c26a569c6611d4662356609276905"} Jan 29 10:55:40 crc kubenswrapper[4852]: I0129 10:55:40.800012 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" podStartSLOduration=2.25015244 podStartE2EDuration="9.799986531s" podCreationTimestamp="2026-01-29 10:55:31 +0000 UTC" firstStartedPulling="2026-01-29 10:55:32.3310775 +0000 UTC m=+829.548408634" lastFinishedPulling="2026-01-29 10:55:39.880911591 +0000 UTC m=+837.098242725" observedRunningTime="2026-01-29 10:55:40.772934756 +0000 UTC m=+837.990265880" watchObservedRunningTime="2026-01-29 10:55:40.799986531 +0000 UTC m=+838.017317675" Jan 29 10:55:41 crc kubenswrapper[4852]: I0129 10:55:41.766294 4852 generic.go:334] "Generic (PLEG): container finished" podID="fac2b4a6-eb46-4998-bf17-6f7b73b3b43d" containerID="8d5c2f0c064d2bfab5652f083e44843c8a9e774c631d5cb28ae320abdff6102e" exitCode=0 Jan 29 10:55:41 crc kubenswrapper[4852]: I0129 10:55:41.766391 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerDied","Data":"8d5c2f0c064d2bfab5652f083e44843c8a9e774c631d5cb28ae320abdff6102e"} Jan 29 10:55:42 crc kubenswrapper[4852]: I0129 10:55:42.046983 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-mtg5b" Jan 29 10:55:42 crc kubenswrapper[4852]: I0129 10:55:42.773270 4852 generic.go:334] "Generic (PLEG): container finished" podID="fac2b4a6-eb46-4998-bf17-6f7b73b3b43d" containerID="edece9699ed312c293a352f91422929482b0908232f0e94d89f7d040471918bd" exitCode=0 Jan 29 10:55:42 crc kubenswrapper[4852]: I0129 10:55:42.773322 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerDied","Data":"edece9699ed312c293a352f91422929482b0908232f0e94d89f7d040471918bd"} Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.532635 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-rtdgk" Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.783653 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"1945ff192e29801bfd69904d49b6d2d35e9ce1d59715d72111e5ce5b09b8eb8f"} Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.784117 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"05021aa18e014f1668e12ffb37c5f4533abc528f5eec82afe0c57a118f06daad"} Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.784135 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"aed02829c9ef82585574c6682f3d9bbad637710f19d41e4ad11d77077aa7b54e"} Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.784144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"6f17d063969f7bc4fbaa7e2731d87e819bab6bba3dfc2c7c55ef9bc27aba62f8"} Jan 29 10:55:43 crc kubenswrapper[4852]: I0129 10:55:43.784152 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"a5e1b782f75564afed55a15bb47c051d98f3df5b7a7df51fc918443438a86b12"} Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.794961 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rgc7d" event={"ID":"fac2b4a6-eb46-4998-bf17-6f7b73b3b43d","Type":"ContainerStarted","Data":"89ee1dbbfcc322f97320c444d92cdc05421f105c287f54d7df88c24caaae8aa0"} Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.795335 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.831283 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-rgc7d" podStartSLOduration=6.562623059 podStartE2EDuration="13.831256864s" podCreationTimestamp="2026-01-29 10:55:31 +0000 UTC" firstStartedPulling="2026-01-29 10:55:32.631024387 +0000 UTC m=+829.848355521" lastFinishedPulling="2026-01-29 10:55:39.899658182 +0000 UTC m=+837.116989326" observedRunningTime="2026-01-29 10:55:44.82621439 +0000 UTC m=+842.043545534" watchObservedRunningTime="2026-01-29 10:55:44.831256864 +0000 UTC m=+842.048587998" Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.885032 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8"] Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.886531 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.888880 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 10:55:44 crc kubenswrapper[4852]: I0129 10:55:44.893092 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8"] Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.046191 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.046557 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.046731 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59t7f\" (UniqueName: \"kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.148711 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.148814 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.148873 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59t7f\" (UniqueName: \"kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.149320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.150290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.171565 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59t7f\" (UniqueName: \"kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.200020 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.428148 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8"] Jan 29 10:55:45 crc kubenswrapper[4852]: W0129 10:55:45.436195 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1402be79_7e7e_42ee_8c65_8b8cfa48fd1a.slice/crio-e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca WatchSource:0}: Error finding container e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca: Status 404 returned error can't find the container with id e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.801746 4852 generic.go:334] "Generic (PLEG): container finished" podID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerID="985939f6c299ee1498d505bde8af264cbb2bdc911c74f0c18c24143ee46fbb0f" exitCode=0 Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.802437 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerDied","Data":"985939f6c299ee1498d505bde8af264cbb2bdc911c74f0c18c24143ee46fbb0f"} Jan 29 10:55:45 crc kubenswrapper[4852]: I0129 10:55:45.802493 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerStarted","Data":"e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca"} Jan 29 10:55:47 crc kubenswrapper[4852]: I0129 10:55:47.530105 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:47 crc kubenswrapper[4852]: I0129 10:55:47.569811 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:49 crc kubenswrapper[4852]: I0129 10:55:49.850829 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerStarted","Data":"d2f46e9d4f071484e11e4c43989157c07fd633317ea2c3c1e3d71df476534568"} Jan 29 10:55:50 crc kubenswrapper[4852]: I0129 10:55:50.858651 4852 generic.go:334] "Generic (PLEG): container finished" podID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerID="d2f46e9d4f071484e11e4c43989157c07fd633317ea2c3c1e3d71df476534568" exitCode=0 Jan 29 10:55:50 crc kubenswrapper[4852]: I0129 10:55:50.858770 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerDied","Data":"d2f46e9d4f071484e11e4c43989157c07fd633317ea2c3c1e3d71df476534568"} Jan 29 10:55:51 crc kubenswrapper[4852]: I0129 10:55:51.868923 4852 generic.go:334] "Generic (PLEG): container finished" podID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerID="5d8bdebc32d944aadd4390e734844455aa766740ebdfffd320280d1aa435c77c" exitCode=0 Jan 29 10:55:51 crc kubenswrapper[4852]: I0129 10:55:51.868973 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerDied","Data":"5d8bdebc32d944aadd4390e734844455aa766740ebdfffd320280d1aa435c77c"} Jan 29 10:55:51 crc kubenswrapper[4852]: I0129 10:55:51.950121 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-pz8m8" Jan 29 10:55:52 crc kubenswrapper[4852]: I0129 10:55:52.532972 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-rgc7d" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.141894 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.292755 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59t7f\" (UniqueName: \"kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f\") pod \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.292807 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle\") pod \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.292906 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util\") pod \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\" (UID: \"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a\") " Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.293679 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle" (OuterVolumeSpecName: "bundle") pod "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" (UID: "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.307748 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util" (OuterVolumeSpecName: "util") pod "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" (UID: "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.315897 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f" (OuterVolumeSpecName: "kube-api-access-59t7f") pod "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" (UID: "1402be79-7e7e-42ee-8c65-8b8cfa48fd1a"). InnerVolumeSpecName "kube-api-access-59t7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.393818 4852 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-util\") on node \"crc\" DevicePath \"\"" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.393845 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59t7f\" (UniqueName: \"kubernetes.io/projected/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-kube-api-access-59t7f\") on node \"crc\" DevicePath \"\"" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.393855 4852 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1402be79-7e7e-42ee-8c65-8b8cfa48fd1a-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.883747 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" event={"ID":"1402be79-7e7e-42ee-8c65-8b8cfa48fd1a","Type":"ContainerDied","Data":"e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca"} Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.884052 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e61574e561fd52e752fe15b4f6fe47b0cd88f1ec66e6adb1e4c30b056bc2c4ca" Jan 29 10:55:53 crc kubenswrapper[4852]: I0129 10:55:53.883831 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.610119 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc"] Jan 29 10:55:57 crc kubenswrapper[4852]: E0129 10:55:57.610711 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="pull" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.610726 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="pull" Jan 29 10:55:57 crc kubenswrapper[4852]: E0129 10:55:57.610743 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="extract" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.610751 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="extract" Jan 29 10:55:57 crc kubenswrapper[4852]: E0129 10:55:57.610773 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="util" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.610781 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="util" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.610916 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1402be79-7e7e-42ee-8c65-8b8cfa48fd1a" containerName="extract" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.611414 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.620156 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.620550 4852 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-tvkq8" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.620919 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.636818 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc"] Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.748328 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wllh\" (UniqueName: \"kubernetes.io/projected/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-kube-api-access-6wllh\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.748656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.849444 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.850113 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.850301 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wllh\" (UniqueName: \"kubernetes.io/projected/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-kube-api-access-6wllh\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.869532 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wllh\" (UniqueName: \"kubernetes.io/projected/ed8caa17-6d9c-40cf-9b6b-1fad852d3673-kube-api-access-6wllh\") pod \"cert-manager-operator-controller-manager-66c8bdd694-4v7nc\" (UID: \"ed8caa17-6d9c-40cf-9b6b-1fad852d3673\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:57 crc kubenswrapper[4852]: I0129 10:55:57.941731 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" Jan 29 10:55:58 crc kubenswrapper[4852]: I0129 10:55:58.356182 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc"] Jan 29 10:55:58 crc kubenswrapper[4852]: W0129 10:55:58.359848 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded8caa17_6d9c_40cf_9b6b_1fad852d3673.slice/crio-f2c155980909ebfd576c7f8898a639624feb6b89999a40120330d6d1d019e050 WatchSource:0}: Error finding container f2c155980909ebfd576c7f8898a639624feb6b89999a40120330d6d1d019e050: Status 404 returned error can't find the container with id f2c155980909ebfd576c7f8898a639624feb6b89999a40120330d6d1d019e050 Jan 29 10:55:58 crc kubenswrapper[4852]: I0129 10:55:58.921600 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" event={"ID":"ed8caa17-6d9c-40cf-9b6b-1fad852d3673","Type":"ContainerStarted","Data":"f2c155980909ebfd576c7f8898a639624feb6b89999a40120330d6d1d019e050"} Jan 29 10:56:10 crc kubenswrapper[4852]: E0129 10:56:10.829945 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3813920102/1\": happened during read: context canceled" image="registry.redhat.io/cert-manager/cert-manager-operator-rhel9@sha256:99526f5a179816df1f7f51df0517136b247d815b7bdce0a5d0eb7cdaf4b5ce7a" Jan 29 10:56:10 crc kubenswrapper[4852]: E0129 10:56:10.831532 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-operator,Image:registry.redhat.io/cert-manager/cert-manager-operator-rhel9@sha256:99526f5a179816df1f7f51df0517136b247d815b7bdce0a5d0eb7cdaf4b5ce7a,Command:[/usr/bin/cert-manager-operator],Args:[start --v=$(OPERATOR_LOG_LEVEL) --trusted-ca-configmap=$(TRUSTED_CA_CONFIGMAP_NAME) --cloud-credentials-secret=$(CLOUD_CREDENTIALS_SECRET_NAME) --unsupported-addon-features=$(UNSUPPORTED_ADDON_FEATURES)],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.annotations['olm.targetNamespaces'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:cert-manager-operator,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CERT_MANAGER_WEBHOOK,Value:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:903ce74138b1ffc735846a7c5fcdf62bbe82ca29568a6b38caec2656f6637671,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CERT_MANAGER_CA_INJECTOR,Value:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:903ce74138b1ffc735846a7c5fcdf62bbe82ca29568a6b38caec2656f6637671,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CERT_MANAGER_CONTROLLER,Value:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:903ce74138b1ffc735846a7c5fcdf62bbe82ca29568a6b38caec2656f6637671,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CERT_MANAGER_ACMESOLVER,Value:registry.redhat.io/cert-manager/jetstack-cert-manager-acmesolver-rhel9@sha256:38899dcd99bcd1c8c8d2c67cd19d5b1756434028ed2f1b926a282723bd63183e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CERT_MANAGER_ISTIOCSR,Value:registry.redhat.io/cert-manager/cert-manager-istio-csr-rhel9@sha256:fb89adfcc4bcdaf21bed27dbc90586fb2b32180259b9f9d6fcf84a38e401fe03,ValueFrom:nil,},EnvVar{Name:OPERAND_IMAGE_VERSION,Value:1.18.4,ValueFrom:nil,},EnvVar{Name:ISTIOCSR_OPERAND_IMAGE_VERSION,Value:0.14.2,ValueFrom:nil,},EnvVar{Name:OPERATOR_IMAGE_VERSION,Value:1.18.1,ValueFrom:nil,},EnvVar{Name:OPERATOR_LOG_LEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:TRUSTED_CA_CONFIGMAP_NAME,Value:,ValueFrom:nil,},EnvVar{Name:CLOUD_CREDENTIALS_SECRET_NAME,Value:,ValueFrom:nil,},EnvVar{Name:UNSUPPORTED_ADDON_FEATURES,Value:,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cert-manager-operator.v1.18.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{33554432 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6wllh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:*false,SELinuxOptions:nil,RunAsUser:*1000700000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-operator-controller-manager-66c8bdd694-4v7nc_cert-manager-operator(ed8caa17-6d9c-40cf-9b6b-1fad852d3673): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage3813920102/1\": happened during read: context canceled" logger="UnhandledError" Jan 29 10:56:10 crc kubenswrapper[4852]: E0129 10:56:10.832887 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage3813920102/1\\\": happened during read: context canceled\"" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" podUID="ed8caa17-6d9c-40cf-9b6b-1fad852d3673" Jan 29 10:56:11 crc kubenswrapper[4852]: E0129 10:56:11.002161 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cert-manager/cert-manager-operator-rhel9@sha256:99526f5a179816df1f7f51df0517136b247d815b7bdce0a5d0eb7cdaf4b5ce7a\\\"\"" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" podUID="ed8caa17-6d9c-40cf-9b6b-1fad852d3673" Jan 29 10:56:26 crc kubenswrapper[4852]: I0129 10:56:26.092862 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" event={"ID":"ed8caa17-6d9c-40cf-9b6b-1fad852d3673","Type":"ContainerStarted","Data":"7cfdbb1464330219edd73a06cf61378ee112b274d93f871197d97197f4197211"} Jan 29 10:56:26 crc kubenswrapper[4852]: I0129 10:56:26.117856 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-4v7nc" podStartSLOduration=1.8563874070000002 podStartE2EDuration="29.117839763s" podCreationTimestamp="2026-01-29 10:55:57 +0000 UTC" firstStartedPulling="2026-01-29 10:55:58.362380473 +0000 UTC m=+855.579711607" lastFinishedPulling="2026-01-29 10:56:25.623832829 +0000 UTC m=+882.841163963" observedRunningTime="2026-01-29 10:56:26.112739517 +0000 UTC m=+883.330070651" watchObservedRunningTime="2026-01-29 10:56:26.117839763 +0000 UTC m=+883.335170897" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.886839 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-w72np"] Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.887786 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.889809 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.890354 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.890410 4852 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lljgj" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.900038 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-w72np"] Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.983483 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:28 crc kubenswrapper[4852]: I0129 10:56:28.983638 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cjrd\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-kube-api-access-4cjrd\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.084472 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cjrd\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-kube-api-access-4cjrd\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.084530 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.103350 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.104352 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cjrd\" (UniqueName: \"kubernetes.io/projected/8379fbbf-af14-42ac-af6e-6d08d56fd6c6-kube-api-access-4cjrd\") pod \"cert-manager-webhook-6888856db4-w72np\" (UID: \"8379fbbf-af14-42ac-af6e-6d08d56fd6c6\") " pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.201792 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:29 crc kubenswrapper[4852]: I0129 10:56:29.397290 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-w72np"] Jan 29 10:56:29 crc kubenswrapper[4852]: W0129 10:56:29.407661 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8379fbbf_af14_42ac_af6e_6d08d56fd6c6.slice/crio-679dd9c7b3ec589d0c14b9af86abaebe7398e90c513c1eb915102a29c2382185 WatchSource:0}: Error finding container 679dd9c7b3ec589d0c14b9af86abaebe7398e90c513c1eb915102a29c2382185: Status 404 returned error can't find the container with id 679dd9c7b3ec589d0c14b9af86abaebe7398e90c513c1eb915102a29c2382185 Jan 29 10:56:30 crc kubenswrapper[4852]: I0129 10:56:30.119559 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" event={"ID":"8379fbbf-af14-42ac-af6e-6d08d56fd6c6","Type":"ContainerStarted","Data":"679dd9c7b3ec589d0c14b9af86abaebe7398e90c513c1eb915102a29c2382185"} Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.406185 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-9cvtz"] Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.407016 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.408910 4852 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-fxs55" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.414607 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-9cvtz"] Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.443973 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98frl\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-kube-api-access-98frl\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.444033 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.545264 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98frl\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-kube-api-access-98frl\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.546180 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.566799 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98frl\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-kube-api-access-98frl\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.567459 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b5435b15-3424-42c4-9d63-d3f37d3226f4-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-9cvtz\" (UID: \"b5435b15-3424-42c4-9d63-d3f37d3226f4\") " pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:31 crc kubenswrapper[4852]: I0129 10:56:31.760986 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" Jan 29 10:56:32 crc kubenswrapper[4852]: I0129 10:56:32.177180 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-9cvtz"] Jan 29 10:56:33 crc kubenswrapper[4852]: I0129 10:56:33.179649 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" event={"ID":"b5435b15-3424-42c4-9d63-d3f37d3226f4","Type":"ContainerStarted","Data":"8d1915ebc572aaca1663fbad7a87266bf26ff2f243c6cd6889b0a9215b318a98"} Jan 29 10:56:35 crc kubenswrapper[4852]: I0129 10:56:35.192535 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" event={"ID":"8379fbbf-af14-42ac-af6e-6d08d56fd6c6","Type":"ContainerStarted","Data":"82fb649b6989e9b504ae3a0c1064b5e32a4e1b5f35e0e1170852137e5c74760b"} Jan 29 10:56:35 crc kubenswrapper[4852]: I0129 10:56:35.193012 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:35 crc kubenswrapper[4852]: I0129 10:56:35.194014 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" event={"ID":"b5435b15-3424-42c4-9d63-d3f37d3226f4","Type":"ContainerStarted","Data":"92429e9f7a0300c91c9e3884f58538d945afd7969d1d2e46e9dd28105c9d3de6"} Jan 29 10:56:35 crc kubenswrapper[4852]: I0129 10:56:35.213018 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" podStartSLOduration=2.414423039 podStartE2EDuration="7.213001575s" podCreationTimestamp="2026-01-29 10:56:28 +0000 UTC" firstStartedPulling="2026-01-29 10:56:29.410228652 +0000 UTC m=+886.627559786" lastFinishedPulling="2026-01-29 10:56:34.208807178 +0000 UTC m=+891.426138322" observedRunningTime="2026-01-29 10:56:35.210266087 +0000 UTC m=+892.427597221" watchObservedRunningTime="2026-01-29 10:56:35.213001575 +0000 UTC m=+892.430332709" Jan 29 10:56:35 crc kubenswrapper[4852]: I0129 10:56:35.230530 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-5545bd876-9cvtz" podStartSLOduration=2.197911196 podStartE2EDuration="4.230511926s" podCreationTimestamp="2026-01-29 10:56:31 +0000 UTC" firstStartedPulling="2026-01-29 10:56:32.177334256 +0000 UTC m=+889.394665390" lastFinishedPulling="2026-01-29 10:56:34.209934986 +0000 UTC m=+891.427266120" observedRunningTime="2026-01-29 10:56:35.228226659 +0000 UTC m=+892.445557793" watchObservedRunningTime="2026-01-29 10:56:35.230511926 +0000 UTC m=+892.447843060" Jan 29 10:56:39 crc kubenswrapper[4852]: I0129 10:56:39.204388 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-6888856db4-w72np" Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.786971 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-545d4d4674-wr726"] Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.789109 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.792333 4852 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-q4dht" Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.815879 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-wr726"] Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.980014 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-bound-sa-token\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:47 crc kubenswrapper[4852]: I0129 10:56:47.980089 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-946xd\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-kube-api-access-946xd\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.081715 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-bound-sa-token\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.081778 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-946xd\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-kube-api-access-946xd\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.102032 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-bound-sa-token\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.103758 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-946xd\" (UniqueName: \"kubernetes.io/projected/edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0-kube-api-access-946xd\") pod \"cert-manager-545d4d4674-wr726\" (UID: \"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0\") " pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.128094 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-wr726" Jan 29 10:56:48 crc kubenswrapper[4852]: I0129 10:56:48.547311 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-wr726"] Jan 29 10:56:48 crc kubenswrapper[4852]: W0129 10:56:48.555110 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedbcdb0c_875a_4c47_b2d0_d98d5d9b13b0.slice/crio-0fc1aff060d55305e748f88e5af7c084eb9b53706f535e5d74a9db7e7a1f823c WatchSource:0}: Error finding container 0fc1aff060d55305e748f88e5af7c084eb9b53706f535e5d74a9db7e7a1f823c: Status 404 returned error can't find the container with id 0fc1aff060d55305e748f88e5af7c084eb9b53706f535e5d74a9db7e7a1f823c Jan 29 10:56:49 crc kubenswrapper[4852]: I0129 10:56:49.296045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-wr726" event={"ID":"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0","Type":"ContainerStarted","Data":"5f73579b1309f992b637e65c02955d1aaec9a6f16ec0bc176148df35714a03a1"} Jan 29 10:56:49 crc kubenswrapper[4852]: I0129 10:56:49.296364 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-wr726" event={"ID":"edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0","Type":"ContainerStarted","Data":"0fc1aff060d55305e748f88e5af7c084eb9b53706f535e5d74a9db7e7a1f823c"} Jan 29 10:56:49 crc kubenswrapper[4852]: I0129 10:56:49.323311 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-545d4d4674-wr726" podStartSLOduration=2.323290734 podStartE2EDuration="2.323290734s" podCreationTimestamp="2026-01-29 10:56:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:56:49.317525242 +0000 UTC m=+906.534856386" watchObservedRunningTime="2026-01-29 10:56:49.323290734 +0000 UTC m=+906.540621868" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.384636 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.386212 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.390256 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.390601 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-9fwp5" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.390632 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.397865 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.439870 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlwgv\" (UniqueName: \"kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv\") pod \"openstack-operator-index-6mqq6\" (UID: \"b450a96f-ee13-40e8-bcff-83fda92ca0f8\") " pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.542225 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlwgv\" (UniqueName: \"kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv\") pod \"openstack-operator-index-6mqq6\" (UID: \"b450a96f-ee13-40e8-bcff-83fda92ca0f8\") " pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.564460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlwgv\" (UniqueName: \"kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv\") pod \"openstack-operator-index-6mqq6\" (UID: \"b450a96f-ee13-40e8-bcff-83fda92ca0f8\") " pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:52 crc kubenswrapper[4852]: I0129 10:56:52.715332 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:53 crc kubenswrapper[4852]: W0129 10:56:53.132159 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb450a96f_ee13_40e8_bcff_83fda92ca0f8.slice/crio-747e292b896bd52558b16e2890174c98f7654714fef8a831d6d70219faa110bb WatchSource:0}: Error finding container 747e292b896bd52558b16e2890174c98f7654714fef8a831d6d70219faa110bb: Status 404 returned error can't find the container with id 747e292b896bd52558b16e2890174c98f7654714fef8a831d6d70219faa110bb Jan 29 10:56:53 crc kubenswrapper[4852]: I0129 10:56:53.132339 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:53 crc kubenswrapper[4852]: I0129 10:56:53.323429 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6mqq6" event={"ID":"b450a96f-ee13-40e8-bcff-83fda92ca0f8","Type":"ContainerStarted","Data":"747e292b896bd52558b16e2890174c98f7654714fef8a831d6d70219faa110bb"} Jan 29 10:56:53 crc kubenswrapper[4852]: I0129 10:56:53.950383 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.357194 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-ll8cd"] Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.359492 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.367766 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-ll8cd"] Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.372500 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bjs9\" (UniqueName: \"kubernetes.io/projected/e03a92f3-62d5-4d3c-ae9d-dc09f11644b3-kube-api-access-5bjs9\") pod \"openstack-operator-index-ll8cd\" (UID: \"e03a92f3-62d5-4d3c-ae9d-dc09f11644b3\") " pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.473639 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bjs9\" (UniqueName: \"kubernetes.io/projected/e03a92f3-62d5-4d3c-ae9d-dc09f11644b3-kube-api-access-5bjs9\") pod \"openstack-operator-index-ll8cd\" (UID: \"e03a92f3-62d5-4d3c-ae9d-dc09f11644b3\") " pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.492180 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bjs9\" (UniqueName: \"kubernetes.io/projected/e03a92f3-62d5-4d3c-ae9d-dc09f11644b3-kube-api-access-5bjs9\") pod \"openstack-operator-index-ll8cd\" (UID: \"e03a92f3-62d5-4d3c-ae9d-dc09f11644b3\") " pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:56:54 crc kubenswrapper[4852]: I0129 10:56:54.685081 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.000272 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-ll8cd"] Jan 29 10:56:56 crc kubenswrapper[4852]: W0129 10:56:56.008634 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode03a92f3_62d5_4d3c_ae9d_dc09f11644b3.slice/crio-b7b528f69c547cdf76cd9c08d4aef2c5080e8a66ee4cdcc17b6043560439d018 WatchSource:0}: Error finding container b7b528f69c547cdf76cd9c08d4aef2c5080e8a66ee4cdcc17b6043560439d018: Status 404 returned error can't find the container with id b7b528f69c547cdf76cd9c08d4aef2c5080e8a66ee4cdcc17b6043560439d018 Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.348762 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-ll8cd" event={"ID":"e03a92f3-62d5-4d3c-ae9d-dc09f11644b3","Type":"ContainerStarted","Data":"22db297388fd0d38897e32bd1a1011a65e26505b1af48d8b89b7243c2407d14b"} Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.348829 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-ll8cd" event={"ID":"e03a92f3-62d5-4d3c-ae9d-dc09f11644b3","Type":"ContainerStarted","Data":"b7b528f69c547cdf76cd9c08d4aef2c5080e8a66ee4cdcc17b6043560439d018"} Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.356526 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6mqq6" event={"ID":"b450a96f-ee13-40e8-bcff-83fda92ca0f8","Type":"ContainerStarted","Data":"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37"} Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.356643 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-6mqq6" podUID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" containerName="registry-server" containerID="cri-o://150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37" gracePeriod=2 Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.372398 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-ll8cd" podStartSLOduration=2.329285335 podStartE2EDuration="2.372369576s" podCreationTimestamp="2026-01-29 10:56:54 +0000 UTC" firstStartedPulling="2026-01-29 10:56:56.012954896 +0000 UTC m=+913.230286030" lastFinishedPulling="2026-01-29 10:56:56.056039137 +0000 UTC m=+913.273370271" observedRunningTime="2026-01-29 10:56:56.36482671 +0000 UTC m=+913.582157844" watchObservedRunningTime="2026-01-29 10:56:56.372369576 +0000 UTC m=+913.589700720" Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.380956 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6mqq6" podStartSLOduration=1.839722954 podStartE2EDuration="4.380938267s" podCreationTimestamp="2026-01-29 10:56:52 +0000 UTC" firstStartedPulling="2026-01-29 10:56:53.13490097 +0000 UTC m=+910.352232094" lastFinishedPulling="2026-01-29 10:56:55.676116273 +0000 UTC m=+912.893447407" observedRunningTime="2026-01-29 10:56:56.377809381 +0000 UTC m=+913.595140515" watchObservedRunningTime="2026-01-29 10:56:56.380938267 +0000 UTC m=+913.598269401" Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.790129 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.903125 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlwgv\" (UniqueName: \"kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv\") pod \"b450a96f-ee13-40e8-bcff-83fda92ca0f8\" (UID: \"b450a96f-ee13-40e8-bcff-83fda92ca0f8\") " Jan 29 10:56:56 crc kubenswrapper[4852]: I0129 10:56:56.910762 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv" (OuterVolumeSpecName: "kube-api-access-xlwgv") pod "b450a96f-ee13-40e8-bcff-83fda92ca0f8" (UID: "b450a96f-ee13-40e8-bcff-83fda92ca0f8"). InnerVolumeSpecName "kube-api-access-xlwgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.004415 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlwgv\" (UniqueName: \"kubernetes.io/projected/b450a96f-ee13-40e8-bcff-83fda92ca0f8-kube-api-access-xlwgv\") on node \"crc\" DevicePath \"\"" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.364974 4852 generic.go:334] "Generic (PLEG): container finished" podID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" containerID="150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37" exitCode=0 Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.365049 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6mqq6" event={"ID":"b450a96f-ee13-40e8-bcff-83fda92ca0f8","Type":"ContainerDied","Data":"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37"} Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.365099 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6mqq6" event={"ID":"b450a96f-ee13-40e8-bcff-83fda92ca0f8","Type":"ContainerDied","Data":"747e292b896bd52558b16e2890174c98f7654714fef8a831d6d70219faa110bb"} Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.365121 4852 scope.go:117] "RemoveContainer" containerID="150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.365124 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6mqq6" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.386187 4852 scope.go:117] "RemoveContainer" containerID="150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37" Jan 29 10:56:57 crc kubenswrapper[4852]: E0129 10:56:57.386666 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37\": container with ID starting with 150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37 not found: ID does not exist" containerID="150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.386715 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37"} err="failed to get container status \"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37\": rpc error: code = NotFound desc = could not find container \"150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37\": container with ID starting with 150223c435c3b7b4f0f1f64f8b3c838c9c09235ad426a984d666517769af6a37 not found: ID does not exist" Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.403478 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.411811 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-6mqq6"] Jan 29 10:56:57 crc kubenswrapper[4852]: I0129 10:56:57.470739 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" path="/var/lib/kubelet/pods/b450a96f-ee13-40e8-bcff-83fda92ca0f8/volumes" Jan 29 10:57:00 crc kubenswrapper[4852]: I0129 10:57:00.017240 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:57:00 crc kubenswrapper[4852]: I0129 10:57:00.017649 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.482781 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:04 crc kubenswrapper[4852]: E0129 10:57:04.483473 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" containerName="registry-server" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.483494 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" containerName="registry-server" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.483745 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b450a96f-ee13-40e8-bcff-83fda92ca0f8" containerName="registry-server" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.485154 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.488031 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.514780 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djxkt\" (UniqueName: \"kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.514832 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.514857 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.616112 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djxkt\" (UniqueName: \"kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.616217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.616238 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.616762 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.616857 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.634784 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djxkt\" (UniqueName: \"kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt\") pod \"certified-operators-dpvhs\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.686005 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.686080 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.719319 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:57:04 crc kubenswrapper[4852]: I0129 10:57:04.808851 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:05 crc kubenswrapper[4852]: I0129 10:57:05.251594 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:05 crc kubenswrapper[4852]: I0129 10:57:05.419914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerStarted","Data":"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36"} Jan 29 10:57:05 crc kubenswrapper[4852]: I0129 10:57:05.419957 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerStarted","Data":"b9d85a97f7862a2e6193f7e3dc30eac57ff946be899a030b13d700c57a21fd88"} Jan 29 10:57:05 crc kubenswrapper[4852]: I0129 10:57:05.453809 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-ll8cd" Jan 29 10:57:06 crc kubenswrapper[4852]: I0129 10:57:06.431497 4852 generic.go:334] "Generic (PLEG): container finished" podID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerID="c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36" exitCode=0 Jan 29 10:57:06 crc kubenswrapper[4852]: I0129 10:57:06.431559 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerDied","Data":"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36"} Jan 29 10:57:06 crc kubenswrapper[4852]: I0129 10:57:06.431878 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerStarted","Data":"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10"} Jan 29 10:57:07 crc kubenswrapper[4852]: I0129 10:57:07.440678 4852 generic.go:334] "Generic (PLEG): container finished" podID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerID="9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10" exitCode=0 Jan 29 10:57:07 crc kubenswrapper[4852]: I0129 10:57:07.440718 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerDied","Data":"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10"} Jan 29 10:57:08 crc kubenswrapper[4852]: I0129 10:57:08.449627 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerStarted","Data":"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f"} Jan 29 10:57:08 crc kubenswrapper[4852]: I0129 10:57:08.467502 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dpvhs" podStartSLOduration=2.048432049 podStartE2EDuration="4.467486513s" podCreationTimestamp="2026-01-29 10:57:04 +0000 UTC" firstStartedPulling="2026-01-29 10:57:05.422003574 +0000 UTC m=+922.639334708" lastFinishedPulling="2026-01-29 10:57:07.841058018 +0000 UTC m=+925.058389172" observedRunningTime="2026-01-29 10:57:08.465820802 +0000 UTC m=+925.683151976" watchObservedRunningTime="2026-01-29 10:57:08.467486513 +0000 UTC m=+925.684817647" Jan 29 10:57:14 crc kubenswrapper[4852]: I0129 10:57:14.809014 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:14 crc kubenswrapper[4852]: I0129 10:57:14.810175 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:14 crc kubenswrapper[4852]: I0129 10:57:14.867515 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:15 crc kubenswrapper[4852]: I0129 10:57:15.557877 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:15 crc kubenswrapper[4852]: I0129 10:57:15.611073 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:17 crc kubenswrapper[4852]: I0129 10:57:17.510125 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dpvhs" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="registry-server" containerID="cri-o://5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f" gracePeriod=2 Jan 29 10:57:17 crc kubenswrapper[4852]: I0129 10:57:17.915261 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.001548 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities\") pod \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.001860 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content\") pod \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.001962 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djxkt\" (UniqueName: \"kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt\") pod \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\" (UID: \"aacafb2d-9e5b-45d8-8c1b-95e344f64810\") " Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.003279 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities" (OuterVolumeSpecName: "utilities") pod "aacafb2d-9e5b-45d8-8c1b-95e344f64810" (UID: "aacafb2d-9e5b-45d8-8c1b-95e344f64810"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.008750 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt" (OuterVolumeSpecName: "kube-api-access-djxkt") pod "aacafb2d-9e5b-45d8-8c1b-95e344f64810" (UID: "aacafb2d-9e5b-45d8-8c1b-95e344f64810"). InnerVolumeSpecName "kube-api-access-djxkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.043299 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aacafb2d-9e5b-45d8-8c1b-95e344f64810" (UID: "aacafb2d-9e5b-45d8-8c1b-95e344f64810"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.103724 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.103769 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacafb2d-9e5b-45d8-8c1b-95e344f64810-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.103783 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djxkt\" (UniqueName: \"kubernetes.io/projected/aacafb2d-9e5b-45d8-8c1b-95e344f64810-kube-api-access-djxkt\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.535403 4852 generic.go:334] "Generic (PLEG): container finished" podID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerID="5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f" exitCode=0 Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.535466 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerDied","Data":"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f"} Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.535521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpvhs" event={"ID":"aacafb2d-9e5b-45d8-8c1b-95e344f64810","Type":"ContainerDied","Data":"b9d85a97f7862a2e6193f7e3dc30eac57ff946be899a030b13d700c57a21fd88"} Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.535547 4852 scope.go:117] "RemoveContainer" containerID="5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.535789 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpvhs" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.565790 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq"] Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.566367 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="extract-utilities" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.566475 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="extract-utilities" Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.566614 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="registry-server" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.566721 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="registry-server" Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.566817 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="extract-content" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.566905 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="extract-content" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.567186 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" containerName="registry-server" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.569303 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.574712 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-q92mr" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.581204 4852 scope.go:117] "RemoveContainer" containerID="9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.587752 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq"] Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.605302 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.610350 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.610564 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rjg4\" (UniqueName: \"kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.610677 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dpvhs"] Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.610681 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.623575 4852 scope.go:117] "RemoveContainer" containerID="c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.641572 4852 scope.go:117] "RemoveContainer" containerID="5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f" Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.642035 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f\": container with ID starting with 5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f not found: ID does not exist" containerID="5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.642067 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f"} err="failed to get container status \"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f\": rpc error: code = NotFound desc = could not find container \"5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f\": container with ID starting with 5268ed1232bd0cbf5a45307df0c8a14c7d681342192345ff1ca74da7bb02379f not found: ID does not exist" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.642089 4852 scope.go:117] "RemoveContainer" containerID="9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10" Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.642385 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10\": container with ID starting with 9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10 not found: ID does not exist" containerID="9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.642480 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10"} err="failed to get container status \"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10\": rpc error: code = NotFound desc = could not find container \"9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10\": container with ID starting with 9e0dab32a6728e748197b557b40bd74760dd37534e16b0bb56967ae31af5fd10 not found: ID does not exist" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.642667 4852 scope.go:117] "RemoveContainer" containerID="c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36" Jan 29 10:57:18 crc kubenswrapper[4852]: E0129 10:57:18.643036 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36\": container with ID starting with c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36 not found: ID does not exist" containerID="c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.643087 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36"} err="failed to get container status \"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36\": rpc error: code = NotFound desc = could not find container \"c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36\": container with ID starting with c02daa2285cbb0b12f0b7f9da905fd12f6b49816001646601e58bfdfaec8bf36 not found: ID does not exist" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.711853 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.711965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.712010 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rjg4\" (UniqueName: \"kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.712560 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.712677 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.729684 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rjg4\" (UniqueName: \"kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4\") pod \"39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:18 crc kubenswrapper[4852]: I0129 10:57:18.894080 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:19 crc kubenswrapper[4852]: I0129 10:57:19.343388 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq"] Jan 29 10:57:19 crc kubenswrapper[4852]: I0129 10:57:19.471249 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aacafb2d-9e5b-45d8-8c1b-95e344f64810" path="/var/lib/kubelet/pods/aacafb2d-9e5b-45d8-8c1b-95e344f64810/volumes" Jan 29 10:57:19 crc kubenswrapper[4852]: I0129 10:57:19.544613 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerStarted","Data":"63db2535ed56badbe558bbc4c33275835617a2c36f446669a3a568c4ccb81642"} Jan 29 10:57:19 crc kubenswrapper[4852]: I0129 10:57:19.544961 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerStarted","Data":"6e4a90421a0dd69629b9cfa77c796bb8a5faeb07e47572b77912cad4e1bc1a0f"} Jan 29 10:57:20 crc kubenswrapper[4852]: I0129 10:57:20.552290 4852 generic.go:334] "Generic (PLEG): container finished" podID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerID="63db2535ed56badbe558bbc4c33275835617a2c36f446669a3a568c4ccb81642" exitCode=0 Jan 29 10:57:20 crc kubenswrapper[4852]: I0129 10:57:20.552355 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerDied","Data":"63db2535ed56badbe558bbc4c33275835617a2c36f446669a3a568c4ccb81642"} Jan 29 10:57:21 crc kubenswrapper[4852]: I0129 10:57:21.559864 4852 generic.go:334] "Generic (PLEG): container finished" podID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerID="ca1713e581126fd81f2902ab2fea8eea087d2b3589e330532ab2601a025f6bbb" exitCode=0 Jan 29 10:57:21 crc kubenswrapper[4852]: I0129 10:57:21.560002 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerDied","Data":"ca1713e581126fd81f2902ab2fea8eea087d2b3589e330532ab2601a025f6bbb"} Jan 29 10:57:22 crc kubenswrapper[4852]: I0129 10:57:22.567702 4852 generic.go:334] "Generic (PLEG): container finished" podID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerID="a94ebce9b4e9567ab807171c5dc93cda13433bbc05a8f3cf417be973716bb484" exitCode=0 Jan 29 10:57:22 crc kubenswrapper[4852]: I0129 10:57:22.567892 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerDied","Data":"a94ebce9b4e9567ab807171c5dc93cda13433bbc05a8f3cf417be973716bb484"} Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.851215 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.884473 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util\") pod \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.884517 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rjg4\" (UniqueName: \"kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4\") pod \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.884696 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle\") pod \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\" (UID: \"08fd0cfa-ee12-49db-b31c-3bef3d85ee32\") " Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.885670 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle" (OuterVolumeSpecName: "bundle") pod "08fd0cfa-ee12-49db-b31c-3bef3d85ee32" (UID: "08fd0cfa-ee12-49db-b31c-3bef3d85ee32"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.890019 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4" (OuterVolumeSpecName: "kube-api-access-2rjg4") pod "08fd0cfa-ee12-49db-b31c-3bef3d85ee32" (UID: "08fd0cfa-ee12-49db-b31c-3bef3d85ee32"). InnerVolumeSpecName "kube-api-access-2rjg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.909941 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util" (OuterVolumeSpecName: "util") pod "08fd0cfa-ee12-49db-b31c-3bef3d85ee32" (UID: "08fd0cfa-ee12-49db-b31c-3bef3d85ee32"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.986296 4852 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.986339 4852 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-util\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:23 crc kubenswrapper[4852]: I0129 10:57:23.986351 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rjg4\" (UniqueName: \"kubernetes.io/projected/08fd0cfa-ee12-49db-b31c-3bef3d85ee32-kube-api-access-2rjg4\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:24 crc kubenswrapper[4852]: I0129 10:57:24.585426 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" event={"ID":"08fd0cfa-ee12-49db-b31c-3bef3d85ee32","Type":"ContainerDied","Data":"6e4a90421a0dd69629b9cfa77c796bb8a5faeb07e47572b77912cad4e1bc1a0f"} Jan 29 10:57:24 crc kubenswrapper[4852]: I0129 10:57:24.585473 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq" Jan 29 10:57:24 crc kubenswrapper[4852]: I0129 10:57:24.585475 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e4a90421a0dd69629b9cfa77c796bb8a5faeb07e47572b77912cad4e1bc1a0f" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.163218 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv"] Jan 29 10:57:29 crc kubenswrapper[4852]: E0129 10:57:29.163976 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="extract" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.163990 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="extract" Jan 29 10:57:29 crc kubenswrapper[4852]: E0129 10:57:29.164006 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="util" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.164013 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="util" Jan 29 10:57:29 crc kubenswrapper[4852]: E0129 10:57:29.164029 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="pull" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.164034 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="pull" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.164156 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="08fd0cfa-ee12-49db-b31c-3bef3d85ee32" containerName="extract" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.164606 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.167288 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-pnb7t" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.239462 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv"] Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.249631 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57qtn\" (UniqueName: \"kubernetes.io/projected/69f26544-96bf-4082-af1f-1210db1f56b2-kube-api-access-57qtn\") pod \"openstack-operator-controller-init-78c665cf5d-n85lv\" (UID: \"69f26544-96bf-4082-af1f-1210db1f56b2\") " pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.350595 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57qtn\" (UniqueName: \"kubernetes.io/projected/69f26544-96bf-4082-af1f-1210db1f56b2-kube-api-access-57qtn\") pod \"openstack-operator-controller-init-78c665cf5d-n85lv\" (UID: \"69f26544-96bf-4082-af1f-1210db1f56b2\") " pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.370495 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57qtn\" (UniqueName: \"kubernetes.io/projected/69f26544-96bf-4082-af1f-1210db1f56b2-kube-api-access-57qtn\") pod \"openstack-operator-controller-init-78c665cf5d-n85lv\" (UID: \"69f26544-96bf-4082-af1f-1210db1f56b2\") " pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.481813 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:29 crc kubenswrapper[4852]: I0129 10:57:29.974186 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv"] Jan 29 10:57:29 crc kubenswrapper[4852]: W0129 10:57:29.980115 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69f26544_96bf_4082_af1f_1210db1f56b2.slice/crio-a51835a0f43e9de036713ff0a6b3aba06a44c359e6a344f0e8e31b886f49f240 WatchSource:0}: Error finding container a51835a0f43e9de036713ff0a6b3aba06a44c359e6a344f0e8e31b886f49f240: Status 404 returned error can't find the container with id a51835a0f43e9de036713ff0a6b3aba06a44c359e6a344f0e8e31b886f49f240 Jan 29 10:57:30 crc kubenswrapper[4852]: I0129 10:57:30.016707 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:57:30 crc kubenswrapper[4852]: I0129 10:57:30.016784 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:57:30 crc kubenswrapper[4852]: I0129 10:57:30.632037 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" event={"ID":"69f26544-96bf-4082-af1f-1210db1f56b2","Type":"ContainerStarted","Data":"a51835a0f43e9de036713ff0a6b3aba06a44c359e6a344f0e8e31b886f49f240"} Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.714356 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.716268 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.720985 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.901086 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.901445 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:33 crc kubenswrapper[4852]: I0129 10:57:33.901477 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrkr8\" (UniqueName: \"kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.002972 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.003062 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.003137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrkr8\" (UniqueName: \"kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.003456 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.003514 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.022605 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrkr8\" (UniqueName: \"kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8\") pod \"redhat-marketplace-kw8mp\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.055887 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.476122 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:34 crc kubenswrapper[4852]: W0129 10:57:34.478103 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod922b8e2d_24f3_4e97_8d56_679b745099af.slice/crio-444c72ac47ab64fd4f7f4096b4793ddc2b16fa3223b4582dde4618a8f854c5bc WatchSource:0}: Error finding container 444c72ac47ab64fd4f7f4096b4793ddc2b16fa3223b4582dde4618a8f854c5bc: Status 404 returned error can't find the container with id 444c72ac47ab64fd4f7f4096b4793ddc2b16fa3223b4582dde4618a8f854c5bc Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.672736 4852 generic.go:334] "Generic (PLEG): container finished" podID="922b8e2d-24f3-4e97-8d56-679b745099af" containerID="f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b" exitCode=0 Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.672797 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerDied","Data":"f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b"} Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.673131 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerStarted","Data":"444c72ac47ab64fd4f7f4096b4793ddc2b16fa3223b4582dde4618a8f854c5bc"} Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.678765 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" event={"ID":"69f26544-96bf-4082-af1f-1210db1f56b2","Type":"ContainerStarted","Data":"63a1bd049022558f5ec9524fa8080ef9677f12babab0e914d22dd54285e457df"} Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.678975 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:34 crc kubenswrapper[4852]: I0129 10:57:34.746060 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" podStartSLOduration=1.9178910340000002 podStartE2EDuration="5.746034306s" podCreationTimestamp="2026-01-29 10:57:29 +0000 UTC" firstStartedPulling="2026-01-29 10:57:29.981855746 +0000 UTC m=+947.199186880" lastFinishedPulling="2026-01-29 10:57:33.809999018 +0000 UTC m=+951.027330152" observedRunningTime="2026-01-29 10:57:34.736194654 +0000 UTC m=+951.953525808" watchObservedRunningTime="2026-01-29 10:57:34.746034306 +0000 UTC m=+951.963365440" Jan 29 10:57:35 crc kubenswrapper[4852]: I0129 10:57:35.685870 4852 generic.go:334] "Generic (PLEG): container finished" podID="922b8e2d-24f3-4e97-8d56-679b745099af" containerID="167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b" exitCode=0 Jan 29 10:57:35 crc kubenswrapper[4852]: I0129 10:57:35.685943 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerDied","Data":"167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b"} Jan 29 10:57:36 crc kubenswrapper[4852]: I0129 10:57:36.693072 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerStarted","Data":"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1"} Jan 29 10:57:36 crc kubenswrapper[4852]: I0129 10:57:36.710719 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kw8mp" podStartSLOduration=2.212849241 podStartE2EDuration="3.710700342s" podCreationTimestamp="2026-01-29 10:57:33 +0000 UTC" firstStartedPulling="2026-01-29 10:57:34.674252249 +0000 UTC m=+951.891583383" lastFinishedPulling="2026-01-29 10:57:36.17210334 +0000 UTC m=+953.389434484" observedRunningTime="2026-01-29 10:57:36.706603911 +0000 UTC m=+953.923935065" watchObservedRunningTime="2026-01-29 10:57:36.710700342 +0000 UTC m=+953.928031476" Jan 29 10:57:39 crc kubenswrapper[4852]: I0129 10:57:39.485373 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-78c665cf5d-n85lv" Jan 29 10:57:44 crc kubenswrapper[4852]: I0129 10:57:44.056370 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:44 crc kubenswrapper[4852]: I0129 10:57:44.057478 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:44 crc kubenswrapper[4852]: I0129 10:57:44.103428 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:44 crc kubenswrapper[4852]: I0129 10:57:44.777779 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:44 crc kubenswrapper[4852]: I0129 10:57:44.818725 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:46 crc kubenswrapper[4852]: I0129 10:57:46.751190 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kw8mp" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="registry-server" containerID="cri-o://2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1" gracePeriod=2 Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.138642 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.281066 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrkr8\" (UniqueName: \"kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8\") pod \"922b8e2d-24f3-4e97-8d56-679b745099af\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.281196 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content\") pod \"922b8e2d-24f3-4e97-8d56-679b745099af\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.281236 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities\") pod \"922b8e2d-24f3-4e97-8d56-679b745099af\" (UID: \"922b8e2d-24f3-4e97-8d56-679b745099af\") " Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.282699 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities" (OuterVolumeSpecName: "utilities") pod "922b8e2d-24f3-4e97-8d56-679b745099af" (UID: "922b8e2d-24f3-4e97-8d56-679b745099af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.287359 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8" (OuterVolumeSpecName: "kube-api-access-qrkr8") pod "922b8e2d-24f3-4e97-8d56-679b745099af" (UID: "922b8e2d-24f3-4e97-8d56-679b745099af"). InnerVolumeSpecName "kube-api-access-qrkr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.314082 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "922b8e2d-24f3-4e97-8d56-679b745099af" (UID: "922b8e2d-24f3-4e97-8d56-679b745099af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.382446 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.382483 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922b8e2d-24f3-4e97-8d56-679b745099af-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.382495 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrkr8\" (UniqueName: \"kubernetes.io/projected/922b8e2d-24f3-4e97-8d56-679b745099af-kube-api-access-qrkr8\") on node \"crc\" DevicePath \"\"" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.759989 4852 generic.go:334] "Generic (PLEG): container finished" podID="922b8e2d-24f3-4e97-8d56-679b745099af" containerID="2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1" exitCode=0 Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.760039 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerDied","Data":"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1"} Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.760084 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kw8mp" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.760099 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kw8mp" event={"ID":"922b8e2d-24f3-4e97-8d56-679b745099af","Type":"ContainerDied","Data":"444c72ac47ab64fd4f7f4096b4793ddc2b16fa3223b4582dde4618a8f854c5bc"} Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.760126 4852 scope.go:117] "RemoveContainer" containerID="2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.780066 4852 scope.go:117] "RemoveContainer" containerID="167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.784189 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.791015 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kw8mp"] Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.803730 4852 scope.go:117] "RemoveContainer" containerID="f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.818366 4852 scope.go:117] "RemoveContainer" containerID="2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1" Jan 29 10:57:47 crc kubenswrapper[4852]: E0129 10:57:47.818987 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1\": container with ID starting with 2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1 not found: ID does not exist" containerID="2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.819037 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1"} err="failed to get container status \"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1\": rpc error: code = NotFound desc = could not find container \"2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1\": container with ID starting with 2b91f8afc2110dc9c800d6aa98adcfe72dda0b9466f29cb3107706f7ba0bc9d1 not found: ID does not exist" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.819069 4852 scope.go:117] "RemoveContainer" containerID="167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b" Jan 29 10:57:47 crc kubenswrapper[4852]: E0129 10:57:47.819646 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b\": container with ID starting with 167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b not found: ID does not exist" containerID="167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.819679 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b"} err="failed to get container status \"167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b\": rpc error: code = NotFound desc = could not find container \"167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b\": container with ID starting with 167d66bff7242b65387c3369532339259ffec1f782db23f55dd65cc568e0038b not found: ID does not exist" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.819701 4852 scope.go:117] "RemoveContainer" containerID="f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b" Jan 29 10:57:47 crc kubenswrapper[4852]: E0129 10:57:47.820020 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b\": container with ID starting with f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b not found: ID does not exist" containerID="f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b" Jan 29 10:57:47 crc kubenswrapper[4852]: I0129 10:57:47.820068 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b"} err="failed to get container status \"f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b\": rpc error: code = NotFound desc = could not find container \"f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b\": container with ID starting with f27259e06ab4e1c75f7b1c423507cc0142ef525b89d7f971b254cdf4cc5aa93b not found: ID does not exist" Jan 29 10:57:49 crc kubenswrapper[4852]: I0129 10:57:49.471971 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" path="/var/lib/kubelet/pods/922b8e2d-24f3-4e97-8d56-679b745099af/volumes" Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.016805 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.018341 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.018466 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.019167 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.019316 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0" gracePeriod=600 Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.868990 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0" exitCode=0 Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.869617 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0"} Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.869645 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced"} Jan 29 10:58:00 crc kubenswrapper[4852]: I0129 10:58:00.869661 4852 scope.go:117] "RemoveContainer" containerID="756798b6b62af6c0bb8f39d162b21805a228a82cc20b150cf9cff4c9ad06408c" Jan 29 10:58:06 crc kubenswrapper[4852]: I0129 10:58:06.348789 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 10:58:06 crc kubenswrapper[4852]: I0129 10:58:06.349417 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 10:58:06 crc kubenswrapper[4852]: I0129 10:58:06.348794 4852 patch_prober.go:28] interesting pod/router-default-5444994796-tg9p8 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 10:58:06 crc kubenswrapper[4852]: I0129 10:58:06.349545 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-tg9p8" podUID="bb8d8805-ce8c-4ce0-b669-c64e7aa85268" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.848836 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7"] Jan 29 10:58:13 crc kubenswrapper[4852]: E0129 10:58:13.849459 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="extract-utilities" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.849470 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="extract-utilities" Jan 29 10:58:13 crc kubenswrapper[4852]: E0129 10:58:13.849478 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="registry-server" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.849484 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="registry-server" Jan 29 10:58:13 crc kubenswrapper[4852]: E0129 10:58:13.849500 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="extract-content" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.849506 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="extract-content" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.849635 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="922b8e2d-24f3-4e97-8d56-679b745099af" containerName="registry-server" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.850029 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.854328 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-64n6r" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.862520 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.863552 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.865864 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zqb8d" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.868717 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.880510 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.881287 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.883679 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-vdqv5" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.903111 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.948869 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.978097 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp"] Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.978860 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.980231 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7nq4\" (UniqueName: \"kubernetes.io/projected/0df531fa-b2f4-4122-b8e0-25fd0fb8df7b-kube-api-access-l7nq4\") pod \"cinder-operator-controller-manager-8d874c8fc-pz8mg\" (UID: \"0df531fa-b2f4-4122-b8e0-25fd0fb8df7b\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.980264 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9bl5\" (UniqueName: \"kubernetes.io/projected/0fb78196-264f-44ee-b16b-4a26e4317789-kube-api-access-w9bl5\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-rqdx7\" (UID: \"0fb78196-264f-44ee-b16b-4a26e4317789\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.980299 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qhvj\" (UniqueName: \"kubernetes.io/projected/690fdb1f-20fe-4c9b-9297-7a4e3f623351-kube-api-access-4qhvj\") pod \"designate-operator-controller-manager-6d9697b7f4-jgnnf\" (UID: \"690fdb1f-20fe-4c9b-9297-7a4e3f623351\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:13 crc kubenswrapper[4852]: I0129 10:58:13.982931 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-mj9nm" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.012116 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.022351 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.022446 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.024978 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-8z8mk" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.040298 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.056663 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.057459 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.060280 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-7q7jg" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.065345 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.072915 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.074027 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.075494 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-gznrf" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.076649 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.081992 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7nq4\" (UniqueName: \"kubernetes.io/projected/0df531fa-b2f4-4122-b8e0-25fd0fb8df7b-kube-api-access-l7nq4\") pod \"cinder-operator-controller-manager-8d874c8fc-pz8mg\" (UID: \"0df531fa-b2f4-4122-b8e0-25fd0fb8df7b\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.082028 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9bl5\" (UniqueName: \"kubernetes.io/projected/0fb78196-264f-44ee-b16b-4a26e4317789-kube-api-access-w9bl5\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-rqdx7\" (UID: \"0fb78196-264f-44ee-b16b-4a26e4317789\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.082072 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qhvj\" (UniqueName: \"kubernetes.io/projected/690fdb1f-20fe-4c9b-9297-7a4e3f623351-kube-api-access-4qhvj\") pod \"designate-operator-controller-manager-6d9697b7f4-jgnnf\" (UID: \"690fdb1f-20fe-4c9b-9297-7a4e3f623351\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.082095 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsxhh\" (UniqueName: \"kubernetes.io/projected/a7118d7b-d09a-405d-83f9-558e8d5895e1-kube-api-access-jsxhh\") pod \"glance-operator-controller-manager-8886f4c47-z7mxp\" (UID: \"a7118d7b-d09a-405d-83f9-558e8d5895e1\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.082128 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzmr4\" (UniqueName: \"kubernetes.io/projected/d67255eb-825c-46cc-9deb-8b82ef97a888-kube-api-access-rzmr4\") pod \"heat-operator-controller-manager-69d6db494d-lg5cr\" (UID: \"d67255eb-825c-46cc-9deb-8b82ef97a888\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.091380 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.111860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7nq4\" (UniqueName: \"kubernetes.io/projected/0df531fa-b2f4-4122-b8e0-25fd0fb8df7b-kube-api-access-l7nq4\") pod \"cinder-operator-controller-manager-8d874c8fc-pz8mg\" (UID: \"0df531fa-b2f4-4122-b8e0-25fd0fb8df7b\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.111918 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.112739 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.115527 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rn7h7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.116212 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qhvj\" (UniqueName: \"kubernetes.io/projected/690fdb1f-20fe-4c9b-9297-7a4e3f623351-kube-api-access-4qhvj\") pod \"designate-operator-controller-manager-6d9697b7f4-jgnnf\" (UID: \"690fdb1f-20fe-4c9b-9297-7a4e3f623351\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.135311 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9bl5\" (UniqueName: \"kubernetes.io/projected/0fb78196-264f-44ee-b16b-4a26e4317789-kube-api-access-w9bl5\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-rqdx7\" (UID: \"0fb78196-264f-44ee-b16b-4a26e4317789\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.168356 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.171674 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.177033 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.177903 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.179571 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-ttxr2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.181009 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183091 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7njr7\" (UniqueName: \"kubernetes.io/projected/1b860bbd-067b-42e5-9c41-78cc915a0a4f-kube-api-access-7njr7\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t4wr\" (UniqueName: \"kubernetes.io/projected/01017d98-2eab-44db-8683-8a4ddd8f506f-kube-api-access-9t4wr\") pod \"horizon-operator-controller-manager-5fb775575f-7wjg2\" (UID: \"01017d98-2eab-44db-8683-8a4ddd8f506f\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183222 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hfjx\" (UniqueName: \"kubernetes.io/projected/53942d52-1e48-4496-bc9b-118126410877-kube-api-access-5hfjx\") pod \"ironic-operator-controller-manager-5f4b8bd54d-db6p9\" (UID: \"53942d52-1e48-4496-bc9b-118126410877\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183246 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsxhh\" (UniqueName: \"kubernetes.io/projected/a7118d7b-d09a-405d-83f9-558e8d5895e1-kube-api-access-jsxhh\") pod \"glance-operator-controller-manager-8886f4c47-z7mxp\" (UID: \"a7118d7b-d09a-405d-83f9-558e8d5895e1\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.183305 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzmr4\" (UniqueName: \"kubernetes.io/projected/d67255eb-825c-46cc-9deb-8b82ef97a888-kube-api-access-rzmr4\") pod \"heat-operator-controller-manager-69d6db494d-lg5cr\" (UID: \"d67255eb-825c-46cc-9deb-8b82ef97a888\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.185718 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.193017 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.200130 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.202656 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-96d6g" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.203275 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.215678 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzmr4\" (UniqueName: \"kubernetes.io/projected/d67255eb-825c-46cc-9deb-8b82ef97a888-kube-api-access-rzmr4\") pod \"heat-operator-controller-manager-69d6db494d-lg5cr\" (UID: \"d67255eb-825c-46cc-9deb-8b82ef97a888\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.215737 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.222803 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.223613 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.224450 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsxhh\" (UniqueName: \"kubernetes.io/projected/a7118d7b-d09a-405d-83f9-558e8d5895e1-kube-api-access-jsxhh\") pod \"glance-operator-controller-manager-8886f4c47-z7mxp\" (UID: \"a7118d7b-d09a-405d-83f9-558e8d5895e1\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.228006 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-54wsr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.228464 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.229287 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.232756 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-fj478" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.259004 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.263419 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.270871 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.271685 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.274506 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-t8lw4" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.283640 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.284494 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285053 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285100 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhp85\" (UniqueName: \"kubernetes.io/projected/dab24f31-d338-430f-b891-680412052eb8-kube-api-access-hhp85\") pod \"keystone-operator-controller-manager-84f48565d4-9cqpx\" (UID: \"dab24f31-d338-430f-b891-680412052eb8\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285125 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-826d2\" (UniqueName: \"kubernetes.io/projected/f3f0b5e6-566b-4387-9c99-9b825afc6eec-kube-api-access-826d2\") pod \"manila-operator-controller-manager-7dd968899f-h67ft\" (UID: \"f3f0b5e6-566b-4387-9c99-9b825afc6eec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285166 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4knk\" (UniqueName: \"kubernetes.io/projected/a046a54a-a5c0-4807-9048-76f8513e916d-kube-api-access-g4knk\") pod \"mariadb-operator-controller-manager-67bf948998-74mf9\" (UID: \"a046a54a-a5c0-4807-9048-76f8513e916d\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285198 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7njr7\" (UniqueName: \"kubernetes.io/projected/1b860bbd-067b-42e5-9c41-78cc915a0a4f-kube-api-access-7njr7\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285223 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5vhs\" (UniqueName: \"kubernetes.io/projected/3920c83a-21ab-417c-9d20-fac48cd65803-kube-api-access-z5vhs\") pod \"neutron-operator-controller-manager-585dbc889-vrw88\" (UID: \"3920c83a-21ab-417c-9d20-fac48cd65803\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285253 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t4wr\" (UniqueName: \"kubernetes.io/projected/01017d98-2eab-44db-8683-8a4ddd8f506f-kube-api-access-9t4wr\") pod \"horizon-operator-controller-manager-5fb775575f-7wjg2\" (UID: \"01017d98-2eab-44db-8683-8a4ddd8f506f\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.285279 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hfjx\" (UniqueName: \"kubernetes.io/projected/53942d52-1e48-4496-bc9b-118126410877-kube-api-access-5hfjx\") pod \"ironic-operator-controller-manager-5f4b8bd54d-db6p9\" (UID: \"53942d52-1e48-4496-bc9b-118126410877\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.285549 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.285611 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:14.785581528 +0000 UTC m=+992.002912662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.289858 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-q2mtc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.309711 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.310310 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hfjx\" (UniqueName: \"kubernetes.io/projected/53942d52-1e48-4496-bc9b-118126410877-kube-api-access-5hfjx\") pod \"ironic-operator-controller-manager-5f4b8bd54d-db6p9\" (UID: \"53942d52-1e48-4496-bc9b-118126410877\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.311742 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t4wr\" (UniqueName: \"kubernetes.io/projected/01017d98-2eab-44db-8683-8a4ddd8f506f-kube-api-access-9t4wr\") pod \"horizon-operator-controller-manager-5fb775575f-7wjg2\" (UID: \"01017d98-2eab-44db-8683-8a4ddd8f506f\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.317005 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.318419 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7njr7\" (UniqueName: \"kubernetes.io/projected/1b860bbd-067b-42e5-9c41-78cc915a0a4f-kube-api-access-7njr7\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.329013 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.362294 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.366376 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.368222 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.371896 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-lwqns" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.386757 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.391936 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5vhs\" (UniqueName: \"kubernetes.io/projected/3920c83a-21ab-417c-9d20-fac48cd65803-kube-api-access-z5vhs\") pod \"neutron-operator-controller-manager-585dbc889-vrw88\" (UID: \"3920c83a-21ab-417c-9d20-fac48cd65803\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.392120 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhp85\" (UniqueName: \"kubernetes.io/projected/dab24f31-d338-430f-b891-680412052eb8-kube-api-access-hhp85\") pod \"keystone-operator-controller-manager-84f48565d4-9cqpx\" (UID: \"dab24f31-d338-430f-b891-680412052eb8\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.392149 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-826d2\" (UniqueName: \"kubernetes.io/projected/f3f0b5e6-566b-4387-9c99-9b825afc6eec-kube-api-access-826d2\") pod \"manila-operator-controller-manager-7dd968899f-h67ft\" (UID: \"f3f0b5e6-566b-4387-9c99-9b825afc6eec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.392180 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpb2q\" (UniqueName: \"kubernetes.io/projected/bbdd4422-19ba-4b56-80d7-eb06aba3bab3-kube-api-access-xpb2q\") pod \"octavia-operator-controller-manager-6687f8d877-bdlbj\" (UID: \"bbdd4422-19ba-4b56-80d7-eb06aba3bab3\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.392211 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mxhg\" (UniqueName: \"kubernetes.io/projected/d32dc864-8a2e-429d-ada3-55137a72ea98-kube-api-access-5mxhg\") pod \"nova-operator-controller-manager-55bff696bd-5rr5x\" (UID: \"d32dc864-8a2e-429d-ada3-55137a72ea98\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.392242 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4knk\" (UniqueName: \"kubernetes.io/projected/a046a54a-a5c0-4807-9048-76f8513e916d-kube-api-access-g4knk\") pod \"mariadb-operator-controller-manager-67bf948998-74mf9\" (UID: \"a046a54a-a5c0-4807-9048-76f8513e916d\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.394017 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.394985 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.398068 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-lb4h4" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.398246 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.399362 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.401474 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.401705 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-c4x5q" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.421820 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5vhs\" (UniqueName: \"kubernetes.io/projected/3920c83a-21ab-417c-9d20-fac48cd65803-kube-api-access-z5vhs\") pod \"neutron-operator-controller-manager-585dbc889-vrw88\" (UID: \"3920c83a-21ab-417c-9d20-fac48cd65803\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.429356 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4knk\" (UniqueName: \"kubernetes.io/projected/a046a54a-a5c0-4807-9048-76f8513e916d-kube-api-access-g4knk\") pod \"mariadb-operator-controller-manager-67bf948998-74mf9\" (UID: \"a046a54a-a5c0-4807-9048-76f8513e916d\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.446665 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.472701 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.495660 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wmxn\" (UniqueName: \"kubernetes.io/projected/37dab4de-5de7-41fc-9e1b-0b586a34f190-kube-api-access-9wmxn\") pod \"ovn-operator-controller-manager-788c46999f-5cp9z\" (UID: \"37dab4de-5de7-41fc-9e1b-0b586a34f190\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.496015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpb2q\" (UniqueName: \"kubernetes.io/projected/bbdd4422-19ba-4b56-80d7-eb06aba3bab3-kube-api-access-xpb2q\") pod \"octavia-operator-controller-manager-6687f8d877-bdlbj\" (UID: \"bbdd4422-19ba-4b56-80d7-eb06aba3bab3\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.496104 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mxhg\" (UniqueName: \"kubernetes.io/projected/d32dc864-8a2e-429d-ada3-55137a72ea98-kube-api-access-5mxhg\") pod \"nova-operator-controller-manager-55bff696bd-5rr5x\" (UID: \"d32dc864-8a2e-429d-ada3-55137a72ea98\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.496285 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.496311 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khn8k\" (UniqueName: \"kubernetes.io/projected/b6874ffc-31d8-431e-8792-7bcb511ed0fe-kube-api-access-khn8k\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.496412 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9wwp\" (UniqueName: \"kubernetes.io/projected/17656c45-80cf-44cd-92a5-3b4c90e16e02-kube-api-access-v9wwp\") pod \"placement-operator-controller-manager-5b964cf4cd-n75l6\" (UID: \"17656c45-80cf-44cd-92a5-3b4c90e16e02\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.507234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-826d2\" (UniqueName: \"kubernetes.io/projected/f3f0b5e6-566b-4387-9c99-9b825afc6eec-kube-api-access-826d2\") pod \"manila-operator-controller-manager-7dd968899f-h67ft\" (UID: \"f3f0b5e6-566b-4387-9c99-9b825afc6eec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.511214 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.527056 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhp85\" (UniqueName: \"kubernetes.io/projected/dab24f31-d338-430f-b891-680412052eb8-kube-api-access-hhp85\") pod \"keystone-operator-controller-manager-84f48565d4-9cqpx\" (UID: \"dab24f31-d338-430f-b891-680412052eb8\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.527944 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.529319 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.529988 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpb2q\" (UniqueName: \"kubernetes.io/projected/bbdd4422-19ba-4b56-80d7-eb06aba3bab3-kube-api-access-xpb2q\") pod \"octavia-operator-controller-manager-6687f8d877-bdlbj\" (UID: \"bbdd4422-19ba-4b56-80d7-eb06aba3bab3\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.532086 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pplp8" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.547707 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mxhg\" (UniqueName: \"kubernetes.io/projected/d32dc864-8a2e-429d-ada3-55137a72ea98-kube-api-access-5mxhg\") pod \"nova-operator-controller-manager-55bff696bd-5rr5x\" (UID: \"d32dc864-8a2e-429d-ada3-55137a72ea98\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.565351 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.574605 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.583971 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.592145 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.598515 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.598604 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khn8k\" (UniqueName: \"kubernetes.io/projected/b6874ffc-31d8-431e-8792-7bcb511ed0fe-kube-api-access-khn8k\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.598654 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc7h5\" (UniqueName: \"kubernetes.io/projected/55348ba6-6217-4dc4-99c6-9a7521bddb93-kube-api-access-sc7h5\") pod \"swift-operator-controller-manager-68fc8c869-5xcj7\" (UID: \"55348ba6-6217-4dc4-99c6-9a7521bddb93\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.598677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9wwp\" (UniqueName: \"kubernetes.io/projected/17656c45-80cf-44cd-92a5-3b4c90e16e02-kube-api-access-v9wwp\") pod \"placement-operator-controller-manager-5b964cf4cd-n75l6\" (UID: \"17656c45-80cf-44cd-92a5-3b4c90e16e02\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.598728 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wmxn\" (UniqueName: \"kubernetes.io/projected/37dab4de-5de7-41fc-9e1b-0b586a34f190-kube-api-access-9wmxn\") pod \"ovn-operator-controller-manager-788c46999f-5cp9z\" (UID: \"37dab4de-5de7-41fc-9e1b-0b586a34f190\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.599305 4852 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.599344 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert podName:b6874ffc-31d8-431e-8792-7bcb511ed0fe nodeName:}" failed. No retries permitted until 2026-01-29 10:58:15.099330121 +0000 UTC m=+992.316661255 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" (UID: "b6874ffc-31d8-431e-8792-7bcb511ed0fe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.609756 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.610881 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.611509 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.614801 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-8jlln" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.618810 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.627940 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khn8k\" (UniqueName: \"kubernetes.io/projected/b6874ffc-31d8-431e-8792-7bcb511ed0fe-kube-api-access-khn8k\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.630204 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.637651 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wmxn\" (UniqueName: \"kubernetes.io/projected/37dab4de-5de7-41fc-9e1b-0b586a34f190-kube-api-access-9wmxn\") pod \"ovn-operator-controller-manager-788c46999f-5cp9z\" (UID: \"37dab4de-5de7-41fc-9e1b-0b586a34f190\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.641231 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.645148 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9wwp\" (UniqueName: \"kubernetes.io/projected/17656c45-80cf-44cd-92a5-3b4c90e16e02-kube-api-access-v9wwp\") pod \"placement-operator-controller-manager-5b964cf4cd-n75l6\" (UID: \"17656c45-80cf-44cd-92a5-3b4c90e16e02\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.645492 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.687546 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.701148 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xtrr\" (UniqueName: \"kubernetes.io/projected/c28733de-04a8-4eca-a143-cc129e122b7b-kube-api-access-2xtrr\") pod \"telemetry-operator-controller-manager-64b5b76f97-h5rtr\" (UID: \"c28733de-04a8-4eca-a143-cc129e122b7b\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.701278 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc7h5\" (UniqueName: \"kubernetes.io/projected/55348ba6-6217-4dc4-99c6-9a7521bddb93-kube-api-access-sc7h5\") pod \"swift-operator-controller-manager-68fc8c869-5xcj7\" (UID: \"55348ba6-6217-4dc4-99c6-9a7521bddb93\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.715668 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.716846 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.722149 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-zz2v8" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.727631 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.729178 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc7h5\" (UniqueName: \"kubernetes.io/projected/55348ba6-6217-4dc4-99c6-9a7521bddb93-kube-api-access-sc7h5\") pod \"swift-operator-controller-manager-68fc8c869-5xcj7\" (UID: \"55348ba6-6217-4dc4-99c6-9a7521bddb93\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.778240 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-js7vz"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.779118 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.781153 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-w8cz4" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.790608 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-js7vz"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.806452 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.806500 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xtrr\" (UniqueName: \"kubernetes.io/projected/c28733de-04a8-4eca-a143-cc129e122b7b-kube-api-access-2xtrr\") pod \"telemetry-operator-controller-manager-64b5b76f97-h5rtr\" (UID: \"c28733de-04a8-4eca-a143-cc129e122b7b\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.806560 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b44rr\" (UniqueName: \"kubernetes.io/projected/1f32b855-3964-4f2b-b958-57789ebc722a-kube-api-access-b44rr\") pod \"test-operator-controller-manager-56f8bfcd9f-fwfc2\" (UID: \"1f32b855-3964-4f2b-b958-57789ebc722a\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.806714 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: E0129 10:58:14.806775 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:15.806756427 +0000 UTC m=+993.024087551 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.815407 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.816228 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.819023 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.819372 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-qjvrd" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.819510 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.826650 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.830806 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xtrr\" (UniqueName: \"kubernetes.io/projected/c28733de-04a8-4eca-a143-cc129e122b7b-kube-api-access-2xtrr\") pod \"telemetry-operator-controller-manager-64b5b76f97-h5rtr\" (UID: \"c28733de-04a8-4eca-a143-cc129e122b7b\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.869015 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.870335 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.871020 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.874345 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-k8v92" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.880379 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p"] Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910318 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h66m7\" (UniqueName: \"kubernetes.io/projected/b62831eb-f626-40b6-b332-1fef36357275-kube-api-access-h66m7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-ddd7p\" (UID: \"b62831eb-f626-40b6-b332-1fef36357275\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910385 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vcjs\" (UniqueName: \"kubernetes.io/projected/fe66ce58-71f6-409f-a714-563da2885d40-kube-api-access-4vcjs\") pod \"watcher-operator-controller-manager-564965969-js7vz\" (UID: \"fe66ce58-71f6-409f-a714-563da2885d40\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910429 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b44rr\" (UniqueName: \"kubernetes.io/projected/1f32b855-3964-4f2b-b958-57789ebc722a-kube-api-access-b44rr\") pod \"test-operator-controller-manager-56f8bfcd9f-fwfc2\" (UID: \"1f32b855-3964-4f2b-b958-57789ebc722a\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910482 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910548 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhxfd\" (UniqueName: \"kubernetes.io/projected/8950e863-95ea-40e3-a812-9df855499f77-kube-api-access-xhxfd\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.910610 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.943363 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b44rr\" (UniqueName: \"kubernetes.io/projected/1f32b855-3964-4f2b-b958-57789ebc722a-kube-api-access-b44rr\") pod \"test-operator-controller-manager-56f8bfcd9f-fwfc2\" (UID: \"1f32b855-3964-4f2b-b958-57789ebc722a\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:14 crc kubenswrapper[4852]: I0129 10:58:14.987903 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:14.999910 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.011386 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.011432 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhxfd\" (UniqueName: \"kubernetes.io/projected/8950e863-95ea-40e3-a812-9df855499f77-kube-api-access-xhxfd\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.011471 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.011496 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h66m7\" (UniqueName: \"kubernetes.io/projected/b62831eb-f626-40b6-b332-1fef36357275-kube-api-access-h66m7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-ddd7p\" (UID: \"b62831eb-f626-40b6-b332-1fef36357275\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.011524 4852 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.011577 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:15.511559899 +0000 UTC m=+992.728891023 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "metrics-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.011622 4852 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.011660 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:15.511647951 +0000 UTC m=+992.728979085 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.011528 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vcjs\" (UniqueName: \"kubernetes.io/projected/fe66ce58-71f6-409f-a714-563da2885d40-kube-api-access-4vcjs\") pod \"watcher-operator-controller-manager-564965969-js7vz\" (UID: \"fe66ce58-71f6-409f-a714-563da2885d40\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.048112 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h66m7\" (UniqueName: \"kubernetes.io/projected/b62831eb-f626-40b6-b332-1fef36357275-kube-api-access-h66m7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-ddd7p\" (UID: \"b62831eb-f626-40b6-b332-1fef36357275\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.048670 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhxfd\" (UniqueName: \"kubernetes.io/projected/8950e863-95ea-40e3-a812-9df855499f77-kube-api-access-xhxfd\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.053608 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.070821 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vcjs\" (UniqueName: \"kubernetes.io/projected/fe66ce58-71f6-409f-a714-563da2885d40-kube-api-access-4vcjs\") pod \"watcher-operator-controller-manager-564965969-js7vz\" (UID: \"fe66ce58-71f6-409f-a714-563da2885d40\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.078289 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.115403 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.115576 4852 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.115691 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert podName:b6874ffc-31d8-431e-8792-7bcb511ed0fe nodeName:}" failed. No retries permitted until 2026-01-29 10:58:16.115676951 +0000 UTC m=+993.333008085 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" (UID: "b6874ffc-31d8-431e-8792-7bcb511ed0fe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.160677 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.260195 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.261152 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.345476 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr"] Jan 29 10:58:15 crc kubenswrapper[4852]: W0129 10:58:15.490203 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7118d7b_d09a_405d_83f9_558e8d5895e1.slice/crio-47177fa768cbc09632b6785862a1e741fc4cc409f3c3c250e691da192844342d WatchSource:0}: Error finding container 47177fa768cbc09632b6785862a1e741fc4cc409f3c3c250e691da192844342d: Status 404 returned error can't find the container with id 47177fa768cbc09632b6785862a1e741fc4cc409f3c3c250e691da192844342d Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.533649 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.533687 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.534381 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.534484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.535074 4852 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.535152 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:16.535132837 +0000 UTC m=+993.752463971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.535420 4852 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.535461 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:16.535445864 +0000 UTC m=+993.752776998 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "metrics-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.576136 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.604484 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.756191 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft"] Jan 29 10:58:15 crc kubenswrapper[4852]: W0129 10:58:15.761718 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3f0b5e6_566b_4387_9c99_9b825afc6eec.slice/crio-265e634009b125e008162613c3e3d5f5e6b13c138246acc779f548a33210cf96 WatchSource:0}: Error finding container 265e634009b125e008162613c3e3d5f5e6b13c138246acc779f548a33210cf96: Status 404 returned error can't find the container with id 265e634009b125e008162613c3e3d5f5e6b13c138246acc779f548a33210cf96 Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.770371 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.842631 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.843663 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.843723 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:17.843706423 +0000 UTC m=+995.061037557 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.920013 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.946021 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.952175 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x"] Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.964619 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88"] Jan 29 10:58:15 crc kubenswrapper[4852]: W0129 10:58:15.969209 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbdd4422_19ba_4b56_80d7_eb06aba3bab3.slice/crio-4738943f0b03ace7ad2d0a81bcd7578b7d2eda555e0a2a9afa4546b86001b362 WatchSource:0}: Error finding container 4738943f0b03ace7ad2d0a81bcd7578b7d2eda555e0a2a9afa4546b86001b362: Status 404 returned error can't find the container with id 4738943f0b03ace7ad2d0a81bcd7578b7d2eda555e0a2a9afa4546b86001b362 Jan 29 10:58:15 crc kubenswrapper[4852]: W0129 10:58:15.969891 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd32dc864_8a2e_429d_ada3_55137a72ea98.slice/crio-44e366e7f4699736dd1f259dedd1926776cdabbc1cec4c4af78731734158f467 WatchSource:0}: Error finding container 44e366e7f4699736dd1f259dedd1926776cdabbc1cec4c4af78731734158f467: Status 404 returned error can't find the container with id 44e366e7f4699736dd1f259dedd1926776cdabbc1cec4c4af78731734158f467 Jan 29 10:58:15 crc kubenswrapper[4852]: I0129 10:58:15.970429 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z"] Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.983075 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5mxhg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-55bff696bd-5rr5x_openstack-operators(d32dc864-8a2e-429d-ada3-55137a72ea98): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 10:58:15 crc kubenswrapper[4852]: E0129 10:58:15.984306 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" podUID="d32dc864-8a2e-429d-ada3-55137a72ea98" Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.020670 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" event={"ID":"17656c45-80cf-44cd-92a5-3b4c90e16e02","Type":"ContainerStarted","Data":"649f8bef6d48ddc689af8fcffbea03633dd57fdce8dee45e114085d82a872de7"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.023290 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" event={"ID":"0df531fa-b2f4-4122-b8e0-25fd0fb8df7b","Type":"ContainerStarted","Data":"d38df9f89ab946a1f214893f2cd70b1232857e23df6c0be2a151a29429ea13fd"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.024461 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" event={"ID":"53942d52-1e48-4496-bc9b-118126410877","Type":"ContainerStarted","Data":"27d341c935aefe939edc76d34fdb4fa6e1fa376709b26b3a30fc049035b61d14"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.025499 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" event={"ID":"37dab4de-5de7-41fc-9e1b-0b586a34f190","Type":"ContainerStarted","Data":"d107abc792c6d2046bb28ace768b06a5d751d02f8591c1ab5e1429bb67ab5965"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.027199 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" event={"ID":"3920c83a-21ab-417c-9d20-fac48cd65803","Type":"ContainerStarted","Data":"322a9c53c052787c2ff29f1fa13870265281884e20f47942399cd84fac227432"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.029481 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" event={"ID":"bbdd4422-19ba-4b56-80d7-eb06aba3bab3","Type":"ContainerStarted","Data":"4738943f0b03ace7ad2d0a81bcd7578b7d2eda555e0a2a9afa4546b86001b362"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.031075 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" event={"ID":"690fdb1f-20fe-4c9b-9297-7a4e3f623351","Type":"ContainerStarted","Data":"b58c46e5993de8b8102c970df89580576a1e904b708728fbd966018230474728"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.032704 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" event={"ID":"f3f0b5e6-566b-4387-9c99-9b825afc6eec","Type":"ContainerStarted","Data":"265e634009b125e008162613c3e3d5f5e6b13c138246acc779f548a33210cf96"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.034205 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" event={"ID":"d32dc864-8a2e-429d-ada3-55137a72ea98","Type":"ContainerStarted","Data":"44e366e7f4699736dd1f259dedd1926776cdabbc1cec4c4af78731734158f467"} Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.035543 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e\\\"\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" podUID="d32dc864-8a2e-429d-ada3-55137a72ea98" Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.036022 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" event={"ID":"d67255eb-825c-46cc-9deb-8b82ef97a888","Type":"ContainerStarted","Data":"8dc03226e34a5cf75ee897cd44c970a59e38ade3e33eca0b5b16de1862294586"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.038705 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" event={"ID":"a7118d7b-d09a-405d-83f9-558e8d5895e1","Type":"ContainerStarted","Data":"47177fa768cbc09632b6785862a1e741fc4cc409f3c3c250e691da192844342d"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.040621 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" event={"ID":"dab24f31-d338-430f-b891-680412052eb8","Type":"ContainerStarted","Data":"989376e4e8d466e5a214d3c2847ec9f57d37ef875a2382d430fe1f98de4ecce8"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.042848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" event={"ID":"01017d98-2eab-44db-8683-8a4ddd8f506f","Type":"ContainerStarted","Data":"68ea181025962a288e8d36fe7bdd20b241cd70126f1537ad1c381cf7bc32b181"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.044274 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" event={"ID":"0fb78196-264f-44ee-b16b-4a26e4317789","Type":"ContainerStarted","Data":"6e7c49556264a1eb90c8d86dc9a68eece4d960c9da9a5ab5b59db24caba2d1d4"} Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.103880 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr"] Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.111792 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7"] Jan 29 10:58:16 crc kubenswrapper[4852]: W0129 10:58:16.112712 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55348ba6_6217_4dc4_99c6_9a7521bddb93.slice/crio-1e51a63437fe9fb0b4e0822c3e005368d0113a7a673a1ae7e2b90f6b1f0a815c WatchSource:0}: Error finding container 1e51a63437fe9fb0b4e0822c3e005368d0113a7a673a1ae7e2b90f6b1f0a815c: Status 404 returned error can't find the container with id 1e51a63437fe9fb0b4e0822c3e005368d0113a7a673a1ae7e2b90f6b1f0a815c Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.118490 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9"] Jan 29 10:58:16 crc kubenswrapper[4852]: W0129 10:58:16.125149 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda046a54a_a5c0_4807_9048_76f8513e916d.slice/crio-767209a4f8c72c0ddb9a996b563251aea265a8f89c8ac9ded90304683dc4526b WatchSource:0}: Error finding container 767209a4f8c72c0ddb9a996b563251aea265a8f89c8ac9ded90304683dc4526b: Status 404 returned error can't find the container with id 767209a4f8c72c0ddb9a996b563251aea265a8f89c8ac9ded90304683dc4526b Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.126891 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4knk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-67bf948998-74mf9_openstack-operators(a046a54a-a5c0-4807-9048-76f8513e916d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.127465 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2xtrr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-64b5b76f97-h5rtr_openstack-operators(c28733de-04a8-4eca-a143-cc129e122b7b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.128314 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" podUID="a046a54a-a5c0-4807-9048-76f8513e916d" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.130719 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" podUID="c28733de-04a8-4eca-a143-cc129e122b7b" Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.150000 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.150103 4852 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.150147 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert podName:b6874ffc-31d8-431e-8792-7bcb511ed0fe nodeName:}" failed. No retries permitted until 2026-01-29 10:58:18.150135457 +0000 UTC m=+995.367466591 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" (UID: "b6874ffc-31d8-431e-8792-7bcb511ed0fe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.252192 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p"] Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.261074 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2"] Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.271981 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-js7vz"] Jan 29 10:58:16 crc kubenswrapper[4852]: W0129 10:58:16.276720 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f32b855_3964_4f2b_b958_57789ebc722a.slice/crio-bb57240268cdfdd658849a19c6c8e6bbc92d93eac62ff4afaa1560e7a6a6d197 WatchSource:0}: Error finding container bb57240268cdfdd658849a19c6c8e6bbc92d93eac62ff4afaa1560e7a6a6d197: Status 404 returned error can't find the container with id bb57240268cdfdd658849a19c6c8e6bbc92d93eac62ff4afaa1560e7a6a6d197 Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.290703 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b44rr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-fwfc2_openstack-operators(1f32b855-3964-4f2b-b958-57789ebc722a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.292460 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" podUID="1f32b855-3964-4f2b-b958-57789ebc722a" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.319970 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4vcjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-564965969-js7vz_openstack-operators(fe66ce58-71f6-409f-a714-563da2885d40): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.324473 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" podUID="fe66ce58-71f6-409f-a714-563da2885d40" Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.558208 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:16 crc kubenswrapper[4852]: I0129 10:58:16.558289 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.560652 4852 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.560724 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:18.560703205 +0000 UTC m=+995.778034409 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "metrics-server-cert" not found Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.561232 4852 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 10:58:16 crc kubenswrapper[4852]: E0129 10:58:16.561270 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:18.561259419 +0000 UTC m=+995.778590693 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "webhook-server-cert" not found Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.058256 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" event={"ID":"c28733de-04a8-4eca-a143-cc129e122b7b","Type":"ContainerStarted","Data":"448f75116136e7c3f1098cb85e57e5960a8a8b0a941d35a0573fad05fb8c6f05"} Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.060697 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" podUID="c28733de-04a8-4eca-a143-cc129e122b7b" Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.063862 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" event={"ID":"55348ba6-6217-4dc4-99c6-9a7521bddb93","Type":"ContainerStarted","Data":"1e51a63437fe9fb0b4e0822c3e005368d0113a7a673a1ae7e2b90f6b1f0a815c"} Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.089081 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" event={"ID":"a046a54a-a5c0-4807-9048-76f8513e916d","Type":"ContainerStarted","Data":"767209a4f8c72c0ddb9a996b563251aea265a8f89c8ac9ded90304683dc4526b"} Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.090515 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" podUID="a046a54a-a5c0-4807-9048-76f8513e916d" Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.092548 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" event={"ID":"b62831eb-f626-40b6-b332-1fef36357275","Type":"ContainerStarted","Data":"88e6df5b525c69882e96e83adc0d667031b9bce65eed4660058854822b8ffadc"} Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.095997 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" event={"ID":"1f32b855-3964-4f2b-b958-57789ebc722a","Type":"ContainerStarted","Data":"bb57240268cdfdd658849a19c6c8e6bbc92d93eac62ff4afaa1560e7a6a6d197"} Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.097598 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" podUID="1f32b855-3964-4f2b-b958-57789ebc722a" Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.098644 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" event={"ID":"fe66ce58-71f6-409f-a714-563da2885d40","Type":"ContainerStarted","Data":"b01f46e74ef7e868fcddc3fe0a5a6ba5c84a48e8c3f47b10deec5f0e01a8233d"} Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.099650 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" podUID="fe66ce58-71f6-409f-a714-563da2885d40" Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.099953 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e\\\"\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" podUID="d32dc864-8a2e-429d-ada3-55137a72ea98" Jan 29 10:58:17 crc kubenswrapper[4852]: I0129 10:58:17.895695 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.895849 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:17 crc kubenswrapper[4852]: E0129 10:58:17.896202 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:21.896184533 +0000 UTC m=+999.113515667 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.104957 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" podUID="c28733de-04a8-4eca-a143-cc129e122b7b" Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.105230 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" podUID="fe66ce58-71f6-409f-a714-563da2885d40" Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.105262 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" podUID="1f32b855-3964-4f2b-b958-57789ebc722a" Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.108685 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" podUID="a046a54a-a5c0-4807-9048-76f8513e916d" Jan 29 10:58:18 crc kubenswrapper[4852]: I0129 10:58:18.200662 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.201546 4852 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.201633 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert podName:b6874ffc-31d8-431e-8792-7bcb511ed0fe nodeName:}" failed. No retries permitted until 2026-01-29 10:58:22.201571802 +0000 UTC m=+999.418902936 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" (UID: "b6874ffc-31d8-431e-8792-7bcb511ed0fe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: I0129 10:58:18.608324 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:18 crc kubenswrapper[4852]: I0129 10:58:18.609287 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.609198 4852 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.609503 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:22.609488736 +0000 UTC m=+999.826819860 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "metrics-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.609434 4852 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 10:58:18 crc kubenswrapper[4852]: E0129 10:58:18.609866 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:22.609857694 +0000 UTC m=+999.827188818 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "webhook-server-cert" not found Jan 29 10:58:21 crc kubenswrapper[4852]: I0129 10:58:21.967811 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:21 crc kubenswrapper[4852]: E0129 10:58:21.968034 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:21 crc kubenswrapper[4852]: E0129 10:58:21.968294 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:29.968268692 +0000 UTC m=+1007.185599886 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: I0129 10:58:22.272212 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.272408 4852 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.272486 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert podName:b6874ffc-31d8-431e-8792-7bcb511ed0fe nodeName:}" failed. No retries permitted until 2026-01-29 10:58:30.272468921 +0000 UTC m=+1007.489800055 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" (UID: "b6874ffc-31d8-431e-8792-7bcb511ed0fe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: I0129 10:58:22.677295 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:22 crc kubenswrapper[4852]: I0129 10:58:22.677424 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.677495 4852 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.677544 4852 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.677599 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:30.677561345 +0000 UTC m=+1007.894892549 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "webhook-server-cert" not found Jan 29 10:58:22 crc kubenswrapper[4852]: E0129 10:58:22.677665 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs podName:8950e863-95ea-40e3-a812-9df855499f77 nodeName:}" failed. No retries permitted until 2026-01-29 10:58:30.677648987 +0000 UTC m=+1007.894980111 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs") pod "openstack-operator-controller-manager-58857b9c54-qhkm5" (UID: "8950e863-95ea-40e3-a812-9df855499f77") : secret "metrics-server-cert" not found Jan 29 10:58:28 crc kubenswrapper[4852]: E0129 10:58:28.692192 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4" Jan 29 10:58:28 crc kubenswrapper[4852]: E0129 10:58:28.694226 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9wmxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-788c46999f-5cp9z_openstack-operators(37dab4de-5de7-41fc-9e1b-0b586a34f190): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:28 crc kubenswrapper[4852]: E0129 10:58:28.695664 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" podUID="37dab4de-5de7-41fc-9e1b-0b586a34f190" Jan 29 10:58:29 crc kubenswrapper[4852]: E0129 10:58:29.177274 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" podUID="37dab4de-5de7-41fc-9e1b-0b586a34f190" Jan 29 10:58:29 crc kubenswrapper[4852]: I0129 10:58:29.465705 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.000683 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.000929 4852 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.001067 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert podName:1b860bbd-067b-42e5-9c41-78cc915a0a4f nodeName:}" failed. No retries permitted until 2026-01-29 10:58:46.001046886 +0000 UTC m=+1023.218378020 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert") pod "infra-operator-controller-manager-79955696d6-9c8t7" (UID: "1b860bbd-067b-42e5-9c41-78cc915a0a4f") : secret "infra-operator-webhook-server-cert" not found Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.114964 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.115181 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rzmr4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-69d6db494d-lg5cr_openstack-operators(d67255eb-825c-46cc-9deb-8b82ef97a888): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.116316 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" podUID="d67255eb-825c-46cc-9deb-8b82ef97a888" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.183239 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10\\\"\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" podUID="d67255eb-825c-46cc-9deb-8b82ef97a888" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.305214 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.311144 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6874ffc-31d8-431e-8792-7bcb511ed0fe-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc\" (UID: \"b6874ffc-31d8-431e-8792-7bcb511ed0fe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.572430 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.712181 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.712310 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.722436 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-webhook-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.722482 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8950e863-95ea-40e3-a812-9df855499f77-metrics-certs\") pod \"openstack-operator-controller-manager-58857b9c54-qhkm5\" (UID: \"8950e863-95ea-40e3-a812-9df855499f77\") " pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:30 crc kubenswrapper[4852]: I0129 10:58:30.806816 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.827444 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.827841 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xpb2q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-6687f8d877-bdlbj_openstack-operators(bbdd4422-19ba-4b56-80d7-eb06aba3bab3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:30 crc kubenswrapper[4852]: E0129 10:58:30.829517 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" podUID="bbdd4422-19ba-4b56-80d7-eb06aba3bab3" Jan 29 10:58:31 crc kubenswrapper[4852]: E0129 10:58:31.193910 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" podUID="bbdd4422-19ba-4b56-80d7-eb06aba3bab3" Jan 29 10:58:31 crc kubenswrapper[4852]: E0129 10:58:31.500993 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898" Jan 29 10:58:31 crc kubenswrapper[4852]: E0129 10:58:31.501264 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l7nq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-8d874c8fc-pz8mg_openstack-operators(0df531fa-b2f4-4122-b8e0-25fd0fb8df7b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:31 crc kubenswrapper[4852]: E0129 10:58:31.502731 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" podUID="0df531fa-b2f4-4122-b8e0-25fd0fb8df7b" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.199714 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:6e21a1dda86ba365817102d23a5d4d2d5dcd1c4d8e5f8d74bd24548aa8c63898\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" podUID="0df531fa-b2f4-4122-b8e0-25fd0fb8df7b" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.228281 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.228452 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5hfjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5f4b8bd54d-db6p9_openstack-operators(53942d52-1e48-4496-bc9b-118126410877): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.229742 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" podUID="53942d52-1e48-4496-bc9b-118126410877" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.885002 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.885225 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z5vhs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-585dbc889-vrw88_openstack-operators(3920c83a-21ab-417c-9d20-fac48cd65803): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:32 crc kubenswrapper[4852]: E0129 10:58:32.886459 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" podUID="3920c83a-21ab-417c-9d20-fac48cd65803" Jan 29 10:58:33 crc kubenswrapper[4852]: E0129 10:58:33.206655 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" podUID="3920c83a-21ab-417c-9d20-fac48cd65803" Jan 29 10:58:33 crc kubenswrapper[4852]: E0129 10:58:33.207089 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" podUID="53942d52-1e48-4496-bc9b-118126410877" Jan 29 10:58:33 crc kubenswrapper[4852]: E0129 10:58:33.671779 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8" Jan 29 10:58:33 crc kubenswrapper[4852]: E0129 10:58:33.672193 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9t4wr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5fb775575f-7wjg2_openstack-operators(01017d98-2eab-44db-8683-8a4ddd8f506f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:33 crc kubenswrapper[4852]: E0129 10:58:33.674145 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" podUID="01017d98-2eab-44db-8683-8a4ddd8f506f" Jan 29 10:58:34 crc kubenswrapper[4852]: E0129 10:58:34.209881 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" podUID="01017d98-2eab-44db-8683-8a4ddd8f506f" Jan 29 10:58:35 crc kubenswrapper[4852]: E0129 10:58:35.764931 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 29 10:58:35 crc kubenswrapper[4852]: E0129 10:58:35.765506 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h66m7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-ddd7p_openstack-operators(b62831eb-f626-40b6-b332-1fef36357275): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:58:35 crc kubenswrapper[4852]: E0129 10:58:35.766974 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" podUID="b62831eb-f626-40b6-b332-1fef36357275" Jan 29 10:58:36 crc kubenswrapper[4852]: E0129 10:58:36.225438 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" podUID="b62831eb-f626-40b6-b332-1fef36357275" Jan 29 10:58:38 crc kubenswrapper[4852]: I0129 10:58:38.319358 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5"] Jan 29 10:58:38 crc kubenswrapper[4852]: I0129 10:58:38.362477 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc"] Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.246895 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" event={"ID":"c28733de-04a8-4eca-a143-cc129e122b7b","Type":"ContainerStarted","Data":"41a10d2082c80dc774b40a6395c16785f0640a43767d615e9711db240b77ad01"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.248083 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.250731 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" event={"ID":"dab24f31-d338-430f-b891-680412052eb8","Type":"ContainerStarted","Data":"416fb06582619618d894c074bb2d3f25a78cf5385e6f1289932904b92802bb1a"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.251162 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.252824 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" event={"ID":"55348ba6-6217-4dc4-99c6-9a7521bddb93","Type":"ContainerStarted","Data":"0e702ffc23e83824c24a03a2ddbb9404a9e95dc2331c4cf637e5ef49d6673f8b"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.253165 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.265170 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" event={"ID":"a046a54a-a5c0-4807-9048-76f8513e916d","Type":"ContainerStarted","Data":"0286000956d59e2e9001c663a81f4c7988acb5452ec7c48907f0b7b44e4d6220"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.265987 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.270748 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" podStartSLOduration=3.473383787 podStartE2EDuration="25.270728551s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.126950192 +0000 UTC m=+993.344281316" lastFinishedPulling="2026-01-29 10:58:37.924294946 +0000 UTC m=+1015.141626080" observedRunningTime="2026-01-29 10:58:39.267148654 +0000 UTC m=+1016.484479818" watchObservedRunningTime="2026-01-29 10:58:39.270728551 +0000 UTC m=+1016.488059685" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.282102 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" event={"ID":"f3f0b5e6-566b-4387-9c99-9b825afc6eec","Type":"ContainerStarted","Data":"d8ef3a2dd9cd3457ac64d5b49dfa81dd7cd8653768ec79477b78ebd658ac30f2"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.282829 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.297531 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" podStartSLOduration=3.478981123 podStartE2EDuration="25.297506575s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.126764237 +0000 UTC m=+993.344095371" lastFinishedPulling="2026-01-29 10:58:37.945289689 +0000 UTC m=+1015.162620823" observedRunningTime="2026-01-29 10:58:39.290554735 +0000 UTC m=+1016.507885879" watchObservedRunningTime="2026-01-29 10:58:39.297506575 +0000 UTC m=+1016.514837709" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.297860 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" event={"ID":"8950e863-95ea-40e3-a812-9df855499f77","Type":"ContainerStarted","Data":"32f7b8323c80e9825db90eb003bd626b4c622ab7c287c019beb2e1ac2ed3af92"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.297906 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" event={"ID":"8950e863-95ea-40e3-a812-9df855499f77","Type":"ContainerStarted","Data":"25fcedfa0eb5c20f5c16445c04a51c5e82da2738b0b94397826b33f1fdeed3d7"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.297990 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.302362 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" event={"ID":"d32dc864-8a2e-429d-ada3-55137a72ea98","Type":"ContainerStarted","Data":"3915ea7f73659105087dccaee0db61fdb2942a47d1e2cb21e19e4964b1ea8c0b"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.302743 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.319147 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" event={"ID":"a7118d7b-d09a-405d-83f9-558e8d5895e1","Type":"ContainerStarted","Data":"798cef52e78534ced1bbe64d63e8a01a40e72da5f876dabb520f341df5d2301f"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.319308 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.327200 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" podStartSLOduration=6.233586619 podStartE2EDuration="26.327180941s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.649765346 +0000 UTC m=+992.867096480" lastFinishedPulling="2026-01-29 10:58:35.743359678 +0000 UTC m=+1012.960690802" observedRunningTime="2026-01-29 10:58:39.321170793 +0000 UTC m=+1016.538501937" watchObservedRunningTime="2026-01-29 10:58:39.327180941 +0000 UTC m=+1016.544512085" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.328372 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" event={"ID":"0fb78196-264f-44ee-b16b-4a26e4317789","Type":"ContainerStarted","Data":"5769f5c3162a9aea5d6b90d25b5ceee1c7e94635a4d4cf1e5cb20fef2089dc58"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.329117 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.342043 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" event={"ID":"17656c45-80cf-44cd-92a5-3b4c90e16e02","Type":"ContainerStarted","Data":"fb25e0685f0f83080ce01cdf963fd07e74598df03d134c21b95fbef745d4799f"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.342139 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.343450 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" event={"ID":"b6874ffc-31d8-431e-8792-7bcb511ed0fe","Type":"ContainerStarted","Data":"bd93a4e992f3d7b795dda3f1d911a8a2b01b2354f0b1bef26cd070e1e59a53d6"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.344834 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" podStartSLOduration=5.216004658 podStartE2EDuration="25.344813101s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.115509021 +0000 UTC m=+993.332840155" lastFinishedPulling="2026-01-29 10:58:36.244317464 +0000 UTC m=+1013.461648598" observedRunningTime="2026-01-29 10:58:39.338770034 +0000 UTC m=+1016.556101168" watchObservedRunningTime="2026-01-29 10:58:39.344813101 +0000 UTC m=+1016.562144235" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.346617 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" event={"ID":"fe66ce58-71f6-409f-a714-563da2885d40","Type":"ContainerStarted","Data":"bd66079432bb26d97f5843df864eecd0da204c493efd98edeb6e3fc592463e15"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.347260 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.348961 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" event={"ID":"690fdb1f-20fe-4c9b-9297-7a4e3f623351","Type":"ContainerStarted","Data":"9f73230b3d8f583f03b8c3ca6b71293dfe1349a2448eb6f457fa31d1f3322e5a"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.349064 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.350567 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" event={"ID":"1f32b855-3964-4f2b-b958-57789ebc722a","Type":"ContainerStarted","Data":"64cff71c985083d5699bbdab662bb5fe66e804e0c079f5d72f32e917c25643fd"} Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.350762 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.414409 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" podStartSLOduration=25.41438467 podStartE2EDuration="25.41438467s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:58:39.407505832 +0000 UTC m=+1016.624836966" watchObservedRunningTime="2026-01-29 10:58:39.41438467 +0000 UTC m=+1016.631715804" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.431361 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" podStartSLOduration=5.684371624 podStartE2EDuration="26.431339434s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.497371214 +0000 UTC m=+992.714702348" lastFinishedPulling="2026-01-29 10:58:36.244339024 +0000 UTC m=+1013.461670158" observedRunningTime="2026-01-29 10:58:39.42950844 +0000 UTC m=+1016.646839574" watchObservedRunningTime="2026-01-29 10:58:39.431339434 +0000 UTC m=+1016.648670568" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.452485 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" podStartSLOduration=5.421128294 podStartE2EDuration="26.45246712s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.213192703 +0000 UTC m=+992.430523837" lastFinishedPulling="2026-01-29 10:58:36.244531509 +0000 UTC m=+1013.461862663" observedRunningTime="2026-01-29 10:58:39.448489903 +0000 UTC m=+1016.665821037" watchObservedRunningTime="2026-01-29 10:58:39.45246712 +0000 UTC m=+1016.669798254" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.487289 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" podStartSLOduration=6.006192885 podStartE2EDuration="26.487273411s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.763234558 +0000 UTC m=+992.980565692" lastFinishedPulling="2026-01-29 10:58:36.244315084 +0000 UTC m=+1013.461646218" observedRunningTime="2026-01-29 10:58:39.482197306 +0000 UTC m=+1016.699528440" watchObservedRunningTime="2026-01-29 10:58:39.487273411 +0000 UTC m=+1016.704604545" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.510128 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" podStartSLOduration=3.509819256 podStartE2EDuration="25.510112608s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.98277583 +0000 UTC m=+993.200106964" lastFinishedPulling="2026-01-29 10:58:37.983069182 +0000 UTC m=+1015.200400316" observedRunningTime="2026-01-29 10:58:39.508873968 +0000 UTC m=+1016.726205102" watchObservedRunningTime="2026-01-29 10:58:39.510112608 +0000 UTC m=+1016.727443742" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.533986 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" podStartSLOduration=3.929666161 podStartE2EDuration="25.533969001s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.31976887 +0000 UTC m=+993.537100004" lastFinishedPulling="2026-01-29 10:58:37.92407171 +0000 UTC m=+1015.141402844" observedRunningTime="2026-01-29 10:58:39.528915048 +0000 UTC m=+1016.746246182" watchObservedRunningTime="2026-01-29 10:58:39.533969001 +0000 UTC m=+1016.751300135" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.549175 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" podStartSLOduration=3.884784425 podStartE2EDuration="25.549157662s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.290312051 +0000 UTC m=+993.507643175" lastFinishedPulling="2026-01-29 10:58:37.954685278 +0000 UTC m=+1015.172016412" observedRunningTime="2026-01-29 10:58:39.546342203 +0000 UTC m=+1016.763673337" watchObservedRunningTime="2026-01-29 10:58:39.549157662 +0000 UTC m=+1016.766488796" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.561368 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" podStartSLOduration=5.256801926 podStartE2EDuration="25.56135223s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.938843627 +0000 UTC m=+993.156174761" lastFinishedPulling="2026-01-29 10:58:36.243393911 +0000 UTC m=+1013.460725065" observedRunningTime="2026-01-29 10:58:39.560012177 +0000 UTC m=+1016.777343311" watchObservedRunningTime="2026-01-29 10:58:39.56135223 +0000 UTC m=+1016.778683364" Jan 29 10:58:39 crc kubenswrapper[4852]: I0129 10:58:39.581736 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" podStartSLOduration=5.679754342 podStartE2EDuration="26.581717737s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.34281367 +0000 UTC m=+992.560144804" lastFinishedPulling="2026-01-29 10:58:36.244777065 +0000 UTC m=+1013.462108199" observedRunningTime="2026-01-29 10:58:39.579885702 +0000 UTC m=+1016.797216846" watchObservedRunningTime="2026-01-29 10:58:39.581717737 +0000 UTC m=+1016.799048871" Jan 29 10:58:41 crc kubenswrapper[4852]: I0129 10:58:41.365704 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" event={"ID":"b6874ffc-31d8-431e-8792-7bcb511ed0fe","Type":"ContainerStarted","Data":"3984ddb8f5c1051e79daa872892c60a990d2e291228f5576edab1d3b97a1525d"} Jan 29 10:58:41 crc kubenswrapper[4852]: I0129 10:58:41.401616 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" podStartSLOduration=24.87229794 podStartE2EDuration="27.401569886s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:38.386157936 +0000 UTC m=+1015.603489070" lastFinishedPulling="2026-01-29 10:58:40.915429882 +0000 UTC m=+1018.132761016" observedRunningTime="2026-01-29 10:58:41.392522035 +0000 UTC m=+1018.609853189" watchObservedRunningTime="2026-01-29 10:58:41.401569886 +0000 UTC m=+1018.618901040" Jan 29 10:58:42 crc kubenswrapper[4852]: I0129 10:58:42.371868 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.008568 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.014993 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.022240 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d27zx\" (UniqueName: \"kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.022290 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.022363 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.022711 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.123032 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d27zx\" (UniqueName: \"kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.123308 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.123390 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.123811 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.123903 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.146998 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d27zx\" (UniqueName: \"kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx\") pod \"community-operators-86gx6\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.331535 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.390549 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" event={"ID":"37dab4de-5de7-41fc-9e1b-0b586a34f190","Type":"ContainerStarted","Data":"4f28034702d2977ca3d0ec037373afb851161964abf84f01b05535e4c2c216ff"} Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.391211 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.513484 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" podStartSLOduration=2.625677852 podStartE2EDuration="29.513460137s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.981266433 +0000 UTC m=+993.198597567" lastFinishedPulling="2026-01-29 10:58:42.869048718 +0000 UTC m=+1020.086379852" observedRunningTime="2026-01-29 10:58:43.4128439 +0000 UTC m=+1020.630175034" watchObservedRunningTime="2026-01-29 10:58:43.513460137 +0000 UTC m=+1020.730791271" Jan 29 10:58:43 crc kubenswrapper[4852]: I0129 10:58:43.868067 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:58:43 crc kubenswrapper[4852]: W0129 10:58:43.879514 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77d39ece_605f_452a_adee_ef91bfcbc5c3.slice/crio-b6f9b0631b62869ee0a9870fcd0aef9dfeb888036f1176cc61f6c3feaae62e01 WatchSource:0}: Error finding container b6f9b0631b62869ee0a9870fcd0aef9dfeb888036f1176cc61f6c3feaae62e01: Status 404 returned error can't find the container with id b6f9b0631b62869ee0a9870fcd0aef9dfeb888036f1176cc61f6c3feaae62e01 Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.170845 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-rqdx7" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.211884 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-jgnnf" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.319696 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-z7mxp" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.399286 4852 generic.go:334] "Generic (PLEG): container finished" podID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerID="2456b48d7f3bbe53f2a0d9868f8d27927f371238ceb0a8bcd7ec55245fcd6ad0" exitCode=0 Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.399379 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerDied","Data":"2456b48d7f3bbe53f2a0d9868f8d27927f371238ceb0a8bcd7ec55245fcd6ad0"} Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.399421 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerStarted","Data":"b6f9b0631b62869ee0a9870fcd0aef9dfeb888036f1176cc61f6c3feaae62e01"} Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.402364 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" event={"ID":"53942d52-1e48-4496-bc9b-118126410877","Type":"ContainerStarted","Data":"ce865f1f74fd56bb558ec4eac2927e9fac1f6045f4fb4755907e9dba60d7371f"} Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.402757 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.471874 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" podStartSLOduration=3.018503922 podStartE2EDuration="31.471852795s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.620964253 +0000 UTC m=+992.838295387" lastFinishedPulling="2026-01-29 10:58:44.074313116 +0000 UTC m=+1021.291644260" observedRunningTime="2026-01-29 10:58:44.466893324 +0000 UTC m=+1021.684224458" watchObservedRunningTime="2026-01-29 10:58:44.471852795 +0000 UTC m=+1021.689183929" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.586905 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-9cqpx" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.599084 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-h67ft" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.616110 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-74mf9" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.637065 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-5rr5x" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.693767 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-n75l6" Jan 29 10:58:44 crc kubenswrapper[4852]: I0129 10:58:44.990849 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-5xcj7" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.005918 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-h5rtr" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.059821 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-fwfc2" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.170390 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-564965969-js7vz" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.409231 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" event={"ID":"0df531fa-b2f4-4122-b8e0-25fd0fb8df7b","Type":"ContainerStarted","Data":"0f7573a23d20ce429f1ebdc8da4aa9016291e87c68419aa9aa032e471fce2b32"} Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.409426 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.410878 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" event={"ID":"d67255eb-825c-46cc-9deb-8b82ef97a888","Type":"ContainerStarted","Data":"420ce2ad99fd2f4d7b35c5b18267b408c4edea1f5cc8364acd2d55b60f269da6"} Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.411104 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.412603 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" event={"ID":"bbdd4422-19ba-4b56-80d7-eb06aba3bab3","Type":"ContainerStarted","Data":"2f07e16a7b05cc59faa25772a22ed1fb006d82a39c53044bde883e02d503d852"} Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.412836 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.414502 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerStarted","Data":"75fb89f63a7f64b83160e5b96c1c9b7281dab57b0d4ce290a0d72770b36776c8"} Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.430769 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" podStartSLOduration=3.133738738 podStartE2EDuration="32.430753776s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.549071337 +0000 UTC m=+992.766402471" lastFinishedPulling="2026-01-29 10:58:44.846086375 +0000 UTC m=+1022.063417509" observedRunningTime="2026-01-29 10:58:45.425280012 +0000 UTC m=+1022.642611166" watchObservedRunningTime="2026-01-29 10:58:45.430753776 +0000 UTC m=+1022.648084900" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.443237 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" podStartSLOduration=2.999485288 podStartE2EDuration="32.44321869s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.475325046 +0000 UTC m=+992.692656180" lastFinishedPulling="2026-01-29 10:58:44.919058448 +0000 UTC m=+1022.136389582" observedRunningTime="2026-01-29 10:58:45.439484459 +0000 UTC m=+1022.656815593" watchObservedRunningTime="2026-01-29 10:58:45.44321869 +0000 UTC m=+1022.660549824" Jan 29 10:58:45 crc kubenswrapper[4852]: I0129 10:58:45.475796 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" podStartSLOduration=2.533404839 podStartE2EDuration="31.475779365s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.977559033 +0000 UTC m=+993.194890167" lastFinishedPulling="2026-01-29 10:58:44.919933559 +0000 UTC m=+1022.137264693" observedRunningTime="2026-01-29 10:58:45.472317561 +0000 UTC m=+1022.689648695" watchObservedRunningTime="2026-01-29 10:58:45.475779365 +0000 UTC m=+1022.693110499" Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.064148 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.075361 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1b860bbd-067b-42e5-9c41-78cc915a0a4f-cert\") pod \"infra-operator-controller-manager-79955696d6-9c8t7\" (UID: \"1b860bbd-067b-42e5-9c41-78cc915a0a4f\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.257371 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-gznrf" Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.266532 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.425756 4852 generic.go:334] "Generic (PLEG): container finished" podID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerID="75fb89f63a7f64b83160e5b96c1c9b7281dab57b0d4ce290a0d72770b36776c8" exitCode=0 Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.425931 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerDied","Data":"75fb89f63a7f64b83160e5b96c1c9b7281dab57b0d4ce290a0d72770b36776c8"} Jan 29 10:58:46 crc kubenswrapper[4852]: I0129 10:58:46.631570 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7"] Jan 29 10:58:47 crc kubenswrapper[4852]: I0129 10:58:47.434346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" event={"ID":"1b860bbd-067b-42e5-9c41-78cc915a0a4f","Type":"ContainerStarted","Data":"0329925b2cbcd4b8112d1dff14f65d8faf2496ec15d7207d58381693da252c62"} Jan 29 10:58:47 crc kubenswrapper[4852]: I0129 10:58:47.437271 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerStarted","Data":"1ec645c568acb37b86cb656eea18ddea47a0941ac83ba25a0a7aed2529ff2ace"} Jan 29 10:58:47 crc kubenswrapper[4852]: I0129 10:58:47.460443 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-86gx6" podStartSLOduration=2.997896392 podStartE2EDuration="5.460422838s" podCreationTimestamp="2026-01-29 10:58:42 +0000 UTC" firstStartedPulling="2026-01-29 10:58:44.400756518 +0000 UTC m=+1021.618087652" lastFinishedPulling="2026-01-29 10:58:46.863282954 +0000 UTC m=+1024.080614098" observedRunningTime="2026-01-29 10:58:47.453988732 +0000 UTC m=+1024.671319866" watchObservedRunningTime="2026-01-29 10:58:47.460422838 +0000 UTC m=+1024.677753972" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.455940 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" event={"ID":"01017d98-2eab-44db-8683-8a4ddd8f506f","Type":"ContainerStarted","Data":"a4f51f73b6e940a50e7acb91710da8ec69fccd3cac37b2dd6a1f034b813dfa32"} Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.456825 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.458248 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" event={"ID":"1b860bbd-067b-42e5-9c41-78cc915a0a4f","Type":"ContainerStarted","Data":"52f971d0ca292014b0bb7534e1ba8476146efdf24cc7753a58fef9cc55b20c21"} Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.458397 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.460649 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" event={"ID":"3920c83a-21ab-417c-9d20-fac48cd65803","Type":"ContainerStarted","Data":"a2eb042bc66b393c025db268fb009f686104e9cd553dd00fe21925be7d8de813"} Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.460930 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.484823 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" podStartSLOduration=3.649941835 podStartE2EDuration="36.484801173s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.764508739 +0000 UTC m=+992.981839873" lastFinishedPulling="2026-01-29 10:58:48.599368087 +0000 UTC m=+1025.816699211" observedRunningTime="2026-01-29 10:58:49.480814655 +0000 UTC m=+1026.698145789" watchObservedRunningTime="2026-01-29 10:58:49.484801173 +0000 UTC m=+1026.702132307" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.501957 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" podStartSLOduration=2.596707526 podStartE2EDuration="35.501938572s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:15.980447163 +0000 UTC m=+993.197778297" lastFinishedPulling="2026-01-29 10:58:48.885678209 +0000 UTC m=+1026.103009343" observedRunningTime="2026-01-29 10:58:49.496914429 +0000 UTC m=+1026.714245563" watchObservedRunningTime="2026-01-29 10:58:49.501938572 +0000 UTC m=+1026.719269706" Jan 29 10:58:49 crc kubenswrapper[4852]: I0129 10:58:49.512971 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" podStartSLOduration=34.585800091 podStartE2EDuration="36.51295065s" podCreationTimestamp="2026-01-29 10:58:13 +0000 UTC" firstStartedPulling="2026-01-29 10:58:46.640686627 +0000 UTC m=+1023.858017761" lastFinishedPulling="2026-01-29 10:58:48.567837186 +0000 UTC m=+1025.785168320" observedRunningTime="2026-01-29 10:58:49.510572463 +0000 UTC m=+1026.727903597" watchObservedRunningTime="2026-01-29 10:58:49.51295065 +0000 UTC m=+1026.730281784" Jan 29 10:58:50 crc kubenswrapper[4852]: I0129 10:58:50.468214 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" event={"ID":"b62831eb-f626-40b6-b332-1fef36357275","Type":"ContainerStarted","Data":"6f494918e70be146dcaa339e1a78fa3956e43a9edb90e51cd6bd5a498c803019"} Jan 29 10:58:50 crc kubenswrapper[4852]: I0129 10:58:50.489173 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-ddd7p" podStartSLOduration=2.830608328 podStartE2EDuration="36.489150614s" podCreationTimestamp="2026-01-29 10:58:14 +0000 UTC" firstStartedPulling="2026-01-29 10:58:16.290297771 +0000 UTC m=+993.507628905" lastFinishedPulling="2026-01-29 10:58:49.948840057 +0000 UTC m=+1027.166171191" observedRunningTime="2026-01-29 10:58:50.482369708 +0000 UTC m=+1027.699700862" watchObservedRunningTime="2026-01-29 10:58:50.489150614 +0000 UTC m=+1027.706481758" Jan 29 10:58:50 crc kubenswrapper[4852]: I0129 10:58:50.579573 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc" Jan 29 10:58:50 crc kubenswrapper[4852]: I0129 10:58:50.813574 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-58857b9c54-qhkm5" Jan 29 10:58:53 crc kubenswrapper[4852]: I0129 10:58:53.332282 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:53 crc kubenswrapper[4852]: I0129 10:58:53.332357 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:53 crc kubenswrapper[4852]: I0129 10:58:53.373753 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:53 crc kubenswrapper[4852]: I0129 10:58:53.540204 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:58:53 crc kubenswrapper[4852]: I0129 10:58:53.610314 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.183699 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pz8mg" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.372630 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-lg5cr" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.398523 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-7wjg2" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.476111 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-db6p9" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.621943 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-vrw88" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.649107 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-bdlbj" Jan 29 10:58:54 crc kubenswrapper[4852]: I0129 10:58:54.874245 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-5cp9z" Jan 29 10:58:55 crc kubenswrapper[4852]: I0129 10:58:55.502392 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-86gx6" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="registry-server" containerID="cri-o://1ec645c568acb37b86cb656eea18ddea47a0941ac83ba25a0a7aed2529ff2ace" gracePeriod=2 Jan 29 10:58:56 crc kubenswrapper[4852]: I0129 10:58:56.272029 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-9c8t7" Jan 29 10:58:56 crc kubenswrapper[4852]: I0129 10:58:56.510323 4852 generic.go:334] "Generic (PLEG): container finished" podID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerID="1ec645c568acb37b86cb656eea18ddea47a0941ac83ba25a0a7aed2529ff2ace" exitCode=0 Jan 29 10:58:56 crc kubenswrapper[4852]: I0129 10:58:56.510383 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerDied","Data":"1ec645c568acb37b86cb656eea18ddea47a0941ac83ba25a0a7aed2529ff2ace"} Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.196196 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.269443 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities\") pod \"77d39ece-605f-452a-adee-ef91bfcbc5c3\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.269513 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content\") pod \"77d39ece-605f-452a-adee-ef91bfcbc5c3\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.269647 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d27zx\" (UniqueName: \"kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx\") pod \"77d39ece-605f-452a-adee-ef91bfcbc5c3\" (UID: \"77d39ece-605f-452a-adee-ef91bfcbc5c3\") " Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.270406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities" (OuterVolumeSpecName: "utilities") pod "77d39ece-605f-452a-adee-ef91bfcbc5c3" (UID: "77d39ece-605f-452a-adee-ef91bfcbc5c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.274248 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx" (OuterVolumeSpecName: "kube-api-access-d27zx") pod "77d39ece-605f-452a-adee-ef91bfcbc5c3" (UID: "77d39ece-605f-452a-adee-ef91bfcbc5c3"). InnerVolumeSpecName "kube-api-access-d27zx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.318459 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77d39ece-605f-452a-adee-ef91bfcbc5c3" (UID: "77d39ece-605f-452a-adee-ef91bfcbc5c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.371103 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.371130 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77d39ece-605f-452a-adee-ef91bfcbc5c3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.371141 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d27zx\" (UniqueName: \"kubernetes.io/projected/77d39ece-605f-452a-adee-ef91bfcbc5c3-kube-api-access-d27zx\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.545901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-86gx6" event={"ID":"77d39ece-605f-452a-adee-ef91bfcbc5c3","Type":"ContainerDied","Data":"b6f9b0631b62869ee0a9870fcd0aef9dfeb888036f1176cc61f6c3feaae62e01"} Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.545952 4852 scope.go:117] "RemoveContainer" containerID="1ec645c568acb37b86cb656eea18ddea47a0941ac83ba25a0a7aed2529ff2ace" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.545967 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-86gx6" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.567871 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.572544 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-86gx6"] Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.573223 4852 scope.go:117] "RemoveContainer" containerID="75fb89f63a7f64b83160e5b96c1c9b7281dab57b0d4ce290a0d72770b36776c8" Jan 29 10:59:01 crc kubenswrapper[4852]: I0129 10:59:01.590357 4852 scope.go:117] "RemoveContainer" containerID="2456b48d7f3bbe53f2a0d9868f8d27927f371238ceb0a8bcd7ec55245fcd6ad0" Jan 29 10:59:03 crc kubenswrapper[4852]: I0129 10:59:03.473249 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" path="/var/lib/kubelet/pods/77d39ece-605f-452a-adee-ef91bfcbc5c3/volumes" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.263123 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:12 crc kubenswrapper[4852]: E0129 10:59:12.264183 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="extract-utilities" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.264201 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="extract-utilities" Jan 29 10:59:12 crc kubenswrapper[4852]: E0129 10:59:12.264216 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="extract-content" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.264244 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="extract-content" Jan 29 10:59:12 crc kubenswrapper[4852]: E0129 10:59:12.264266 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="registry-server" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.264274 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="registry-server" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.264488 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d39ece-605f-452a-adee-ef91bfcbc5c3" containerName="registry-server" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.266498 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.269102 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.273392 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-s27rk" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.273470 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.273639 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.276394 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.315295 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.316422 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.318807 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbxw4\" (UniqueName: \"kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.318918 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.322106 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.358549 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.419796 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.419863 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.419917 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fkxf\" (UniqueName: \"kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.419984 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbxw4\" (UniqueName: \"kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.420048 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.421084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.472354 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbxw4\" (UniqueName: \"kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4\") pod \"dnsmasq-dns-675f4bcbfc-dnfjt\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.521332 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fkxf\" (UniqueName: \"kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.521454 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.521497 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.522196 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.522767 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.538280 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fkxf\" (UniqueName: \"kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf\") pod \"dnsmasq-dns-78dd6ddcc-bnwcr\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.590723 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:12 crc kubenswrapper[4852]: I0129 10:59:12.637489 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:13 crc kubenswrapper[4852]: I0129 10:59:13.045320 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:13 crc kubenswrapper[4852]: W0129 10:59:13.105315 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9bc7fcc4_d5c9_4094_99bf_176b499b62b0.slice/crio-ee6062496a10b7f346a79be599c367335e58016a58ea6552f77527ab8f0f9c4f WatchSource:0}: Error finding container ee6062496a10b7f346a79be599c367335e58016a58ea6552f77527ab8f0f9c4f: Status 404 returned error can't find the container with id ee6062496a10b7f346a79be599c367335e58016a58ea6552f77527ab8f0f9c4f Jan 29 10:59:13 crc kubenswrapper[4852]: I0129 10:59:13.111856 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:13 crc kubenswrapper[4852]: I0129 10:59:13.637046 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" event={"ID":"9bc7fcc4-d5c9-4094-99bf-176b499b62b0","Type":"ContainerStarted","Data":"ee6062496a10b7f346a79be599c367335e58016a58ea6552f77527ab8f0f9c4f"} Jan 29 10:59:13 crc kubenswrapper[4852]: I0129 10:59:13.639029 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" event={"ID":"b7348c80-c30a-4930-af34-f81e82629e7a","Type":"ContainerStarted","Data":"b7cbb2780e71676e676f9c54f72d492af9db537591120976fcbbd2e55a79ffc7"} Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.143418 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.166128 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.167491 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.173287 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.364363 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.364429 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w62f\" (UniqueName: \"kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.364466 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.426406 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.467308 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.466302 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.467484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w62f\" (UniqueName: \"kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.467561 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.469132 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.484983 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.486456 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.489430 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.496472 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w62f\" (UniqueName: \"kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f\") pod \"dnsmasq-dns-666b6646f7-q477s\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.681458 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.681545 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gzfx\" (UniqueName: \"kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.681643 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.782231 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.782309 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.782325 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gzfx\" (UniqueName: \"kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.783230 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.783357 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.794009 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.801834 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gzfx\" (UniqueName: \"kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx\") pod \"dnsmasq-dns-57d769cc4f-gpjt4\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:15 crc kubenswrapper[4852]: I0129 10:59:15.832823 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.215278 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 10:59:16 crc kubenswrapper[4852]: W0129 10:59:16.219713 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8bf3dbd_226b_4092_b1b0_29dc8696c0f0.slice/crio-bb283acbebded98ba563c94291b61059a577d576befd8eb0e519379f866ea8e9 WatchSource:0}: Error finding container bb283acbebded98ba563c94291b61059a577d576befd8eb0e519379f866ea8e9: Status 404 returned error can't find the container with id bb283acbebded98ba563c94291b61059a577d576befd8eb0e519379f866ea8e9 Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.309303 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.311025 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.314334 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.314534 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.314746 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-jxzhp" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.317345 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.317758 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.322272 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.337216 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.342550 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.345119 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512183 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512233 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512250 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512286 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512308 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512383 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjnnr\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512399 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512467 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512481 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.512510 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613183 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613289 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613327 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613395 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613415 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613455 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613481 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613535 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjnnr\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613558 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.613691 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.616822 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.617479 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.620708 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.627942 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.629771 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.631725 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.633101 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.636327 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.636603 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.636773 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.637061 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.637102 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-vl5b4" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.637226 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.637321 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.643862 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.650329 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.651707 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.652089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjnnr\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.652408 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.654816 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.657184 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " pod="openstack/rabbitmq-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.667918 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerStarted","Data":"bb283acbebded98ba563c94291b61059a577d576befd8eb0e519379f866ea8e9"} Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.818977 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819052 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819087 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819136 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819349 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819376 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819397 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4czr\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819423 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819463 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819505 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.819533 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921521 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921569 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921661 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921701 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921734 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921754 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921775 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921793 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4czr\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921810 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921838 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.921866 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.922510 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.923399 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.924179 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.925069 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.926037 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.926642 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.927283 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.943360 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.948514 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.949378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4czr\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.961120 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:16 crc kubenswrapper[4852]: I0129 10:59:16.969305 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.042008 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.927680 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.928818 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.930554 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.931595 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q8bpf" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.932204 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.933170 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.937808 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 10:59:17 crc kubenswrapper[4852]: I0129 10:59:17.942759 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.045905 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046215 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046331 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scqnv\" (UniqueName: \"kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046436 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046514 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046600 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046671 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.046740 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148088 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scqnv\" (UniqueName: \"kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148431 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148476 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148505 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148529 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148552 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148594 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.148686 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.149759 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.150004 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.153420 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.153783 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.159350 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.161251 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.169193 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.185089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scqnv\" (UniqueName: \"kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.213375 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " pod="openstack/openstack-galera-0" Jan 29 10:59:18 crc kubenswrapper[4852]: I0129 10:59:18.264927 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.241464 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.245931 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.247626 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-7l7vf" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.249008 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.250840 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.251181 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.270037 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371398 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371612 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371710 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371752 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nkv9\" (UniqueName: \"kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371806 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.371901 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473038 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473084 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473103 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473132 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473155 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473173 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nkv9\" (UniqueName: \"kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473192 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.473220 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.474018 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.474320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.474611 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.474654 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.475347 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.481503 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.482234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.493085 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nkv9\" (UniqueName: \"kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.506562 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.561382 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.562272 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.564140 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-4x9sf" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.564725 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.565351 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.576722 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.582867 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.675979 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.676029 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.676076 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.676097 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.676124 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srnth\" (UniqueName: \"kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.778087 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.778131 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.778177 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srnth\" (UniqueName: \"kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.778256 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.778276 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.779072 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.779560 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.789445 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.794382 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.797923 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srnth\" (UniqueName: \"kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth\") pod \"memcached-0\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " pod="openstack/memcached-0" Jan 29 10:59:19 crc kubenswrapper[4852]: I0129 10:59:19.888012 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 10:59:20 crc kubenswrapper[4852]: W0129 10:59:20.346631 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ec82d6d_0193_45e6_8e0a_7cc3b18715a2.slice/crio-f5770bd1d2c6e8b22502896e8333cadbe13987e0002804b7fe6b3592eead0ffc WatchSource:0}: Error finding container f5770bd1d2c6e8b22502896e8333cadbe13987e0002804b7fe6b3592eead0ffc: Status 404 returned error can't find the container with id f5770bd1d2c6e8b22502896e8333cadbe13987e0002804b7fe6b3592eead0ffc Jan 29 10:59:20 crc kubenswrapper[4852]: I0129 10:59:20.695897 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-q477s" event={"ID":"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2","Type":"ContainerStarted","Data":"f5770bd1d2c6e8b22502896e8333cadbe13987e0002804b7fe6b3592eead0ffc"} Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.299720 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.301406 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.303784 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-48k26" Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.308817 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.400573 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7bk8\" (UniqueName: \"kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8\") pod \"kube-state-metrics-0\" (UID: \"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7\") " pod="openstack/kube-state-metrics-0" Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.501650 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7bk8\" (UniqueName: \"kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8\") pod \"kube-state-metrics-0\" (UID: \"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7\") " pod="openstack/kube-state-metrics-0" Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.519412 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7bk8\" (UniqueName: \"kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8\") pod \"kube-state-metrics-0\" (UID: \"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7\") " pod="openstack/kube-state-metrics-0" Jan 29 10:59:21 crc kubenswrapper[4852]: I0129 10:59:21.620296 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.951668 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.953349 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.955982 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.956270 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.956421 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-rvbhb" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.958328 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.959872 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:24 crc kubenswrapper[4852]: I0129 10:59:24.988807 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.019234 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067124 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067196 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067283 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067384 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067447 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067480 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067528 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66g9p\" (UniqueName: \"kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067683 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067706 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067733 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067764 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.067921 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rpvj\" (UniqueName: \"kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169430 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169543 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169618 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169688 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66g9p\" (UniqueName: \"kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169793 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169832 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169862 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169898 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169933 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.169975 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpvj\" (UniqueName: \"kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.170014 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.170045 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.170076 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.170917 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.173898 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.174082 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.174626 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.175290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.175342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.175631 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.175639 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.178082 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.180026 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.180123 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.191697 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rpvj\" (UniqueName: \"kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj\") pod \"ovn-controller-67sl6\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.205058 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66g9p\" (UniqueName: \"kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p\") pod \"ovn-controller-ovs-k8pcs\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.275824 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.298660 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.562286 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.563491 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.565362 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.571405 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.571458 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.572898 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.574109 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-hxbf7" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.575639 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.679790 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.679885 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.679911 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.680392 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.680656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c44r\" (UniqueName: \"kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.680701 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.680886 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.681072 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783063 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783096 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783132 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c44r\" (UniqueName: \"kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783158 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783272 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783317 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.783816 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.784180 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.784454 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.785074 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.788190 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.788508 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.790061 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.809311 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c44r\" (UniqueName: \"kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.809987 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:25 crc kubenswrapper[4852]: I0129 10:59:25.912753 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.104316 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.106312 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.109319 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-zc6x4" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.110468 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.114139 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.115123 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.123466 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.224410 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.224465 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.225248 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.225325 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.225434 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smldg\" (UniqueName: \"kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.225613 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.225640 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.227061 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.328870 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.328922 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.328984 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smldg\" (UniqueName: \"kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329008 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329027 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329091 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329125 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329143 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329485 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329749 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.329987 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.330876 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.335397 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.340298 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.344169 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.359800 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smldg\" (UniqueName: \"kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.360742 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:28 crc kubenswrapper[4852]: I0129 10:59:28.470134 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.262267 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.262974 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2fkxf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-bnwcr_openstack(b7348c80-c30a-4930-af34-f81e82629e7a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.284681 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" podUID="b7348c80-c30a-4930-af34-f81e82629e7a" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.473549 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.473713 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gbxw4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-dnfjt_openstack(9bc7fcc4-d5c9-4094-99bf-176b499b62b0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 10:59:30 crc kubenswrapper[4852]: E0129 10:59:30.475002 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" podUID="9bc7fcc4-d5c9-4094-99bf-176b499b62b0" Jan 29 10:59:30 crc kubenswrapper[4852]: I0129 10:59:30.782198 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerStarted","Data":"49c522f6394f895c70208779ce476975410c0b3c80178f157f32ccec7fb91457"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.087022 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.088978 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.201361 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.202238 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc\") pod \"b7348c80-c30a-4930-af34-f81e82629e7a\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.202387 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config\") pod \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.202544 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fkxf\" (UniqueName: \"kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf\") pod \"b7348c80-c30a-4930-af34-f81e82629e7a\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.202710 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbxw4\" (UniqueName: \"kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4\") pod \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\" (UID: \"9bc7fcc4-d5c9-4094-99bf-176b499b62b0\") " Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.202911 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config\") pod \"b7348c80-c30a-4930-af34-f81e82629e7a\" (UID: \"b7348c80-c30a-4930-af34-f81e82629e7a\") " Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.204034 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config" (OuterVolumeSpecName: "config") pod "b7348c80-c30a-4930-af34-f81e82629e7a" (UID: "b7348c80-c30a-4930-af34-f81e82629e7a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.204465 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b7348c80-c30a-4930-af34-f81e82629e7a" (UID: "b7348c80-c30a-4930-af34-f81e82629e7a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.205178 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config" (OuterVolumeSpecName: "config") pod "9bc7fcc4-d5c9-4094-99bf-176b499b62b0" (UID: "9bc7fcc4-d5c9-4094-99bf-176b499b62b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.212064 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.212265 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4" (OuterVolumeSpecName: "kube-api-access-gbxw4") pod "9bc7fcc4-d5c9-4094-99bf-176b499b62b0" (UID: "9bc7fcc4-d5c9-4094-99bf-176b499b62b0"). InnerVolumeSpecName "kube-api-access-gbxw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.212620 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf" (OuterVolumeSpecName: "kube-api-access-2fkxf") pod "b7348c80-c30a-4930-af34-f81e82629e7a" (UID: "b7348c80-c30a-4930-af34-f81e82629e7a"). InnerVolumeSpecName "kube-api-access-2fkxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.254193 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.271855 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.292309 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.298563 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304228 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304825 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304845 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b7348c80-c30a-4930-af34-f81e82629e7a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304854 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304865 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fkxf\" (UniqueName: \"kubernetes.io/projected/b7348c80-c30a-4930-af34-f81e82629e7a-kube-api-access-2fkxf\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.304876 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbxw4\" (UniqueName: \"kubernetes.io/projected/9bc7fcc4-d5c9-4094-99bf-176b499b62b0-kube-api-access-gbxw4\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:31 crc kubenswrapper[4852]: W0129 10:59:31.372718 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2c66fcd_07fb_42ea_8176_77a4627b3886.slice/crio-10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e WatchSource:0}: Error finding container 10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e: Status 404 returned error can't find the container with id 10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.374516 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.448477 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 10:59:31 crc kubenswrapper[4852]: W0129 10:59:31.553689 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5bb0b97_62ba_4918_9cf8_b8659b028571.slice/crio-afd512161435d268479ef8106220a08c99eb5b9d26eff603c82cb8dac6665b1c WatchSource:0}: Error finding container afd512161435d268479ef8106220a08c99eb5b9d26eff603c82cb8dac6665b1c: Status 404 returned error can't find the container with id afd512161435d268479ef8106220a08c99eb5b9d26eff603c82cb8dac6665b1c Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.561139 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.791294 4852 generic.go:334] "Generic (PLEG): container finished" podID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerID="49c522f6394f895c70208779ce476975410c0b3c80178f157f32ccec7fb91457" exitCode=0 Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.791370 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerDied","Data":"49c522f6394f895c70208779ce476975410c0b3c80178f157f32ccec7fb91457"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.794540 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerStarted","Data":"ed5f205a6c1838d88c524adbcf1bc4a5c44cf83cda919ceb06f9b0947f601d7b"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.796113 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerStarted","Data":"10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.798398 4852 generic.go:334] "Generic (PLEG): container finished" podID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerID="bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28" exitCode=0 Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.798457 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-q477s" event={"ID":"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2","Type":"ContainerDied","Data":"bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.800502 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ce70759d-e206-41b9-b8d2-52a8ca74f67c","Type":"ContainerStarted","Data":"9232d7f44a8ae6ca9010010b4039552a2e20aaa160b8e92f4137b27b8c291272"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.801908 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6" event={"ID":"ceb5f935-ccb2-4449-964f-b48d616eefea","Type":"ContainerStarted","Data":"db1b6b8ae2034c02ad97d32e7e15ab1eff623ed779f456aecbb254e6a20c01e4"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.805401 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.805401 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-bnwcr" event={"ID":"b7348c80-c30a-4930-af34-f81e82629e7a","Type":"ContainerDied","Data":"b7cbb2780e71676e676f9c54f72d492af9db537591120976fcbbd2e55a79ffc7"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.813882 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerStarted","Data":"ef9d240ed16cad0fc35c05ef8c5c8ae493cfaf3142fa7bc88122a228f78918b9"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.815393 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerStarted","Data":"c8f2afa6863e6acca40aecd7202098e0c30981e2bce86b00a022ae6d2e88ad23"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.816610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerStarted","Data":"afd512161435d268479ef8106220a08c99eb5b9d26eff603c82cb8dac6665b1c"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.817959 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" event={"ID":"9bc7fcc4-d5c9-4094-99bf-176b499b62b0","Type":"ContainerDied","Data":"ee6062496a10b7f346a79be599c367335e58016a58ea6552f77527ab8f0f9c4f"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.818032 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-dnfjt" Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.819845 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerStarted","Data":"d9f64c02452c95ac177c99779642663791eff3e1562b0be3661fa1278f0f2ef6"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.821796 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7","Type":"ContainerStarted","Data":"087c38120c7e4d21ee12c1fa6072526ed743bfc39a48eb861326e91e3a236576"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.823499 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerStarted","Data":"748b797a12353bbc561a885499225b3ebcdac7c649a8a1b5f8692ebf0da123f5"} Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.865240 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.870850 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-dnfjt"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.907154 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:31 crc kubenswrapper[4852]: I0129 10:59:31.923153 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-bnwcr"] Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.842862 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerStarted","Data":"4e9d844945a3af9d01db6e9f68497c0f269565d408a2695a0a6a424055235533"} Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.843222 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.850733 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-q477s" event={"ID":"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2","Type":"ContainerStarted","Data":"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca"} Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.850958 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.901197 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" podStartSLOduration=3.598524035 podStartE2EDuration="17.901150856s" podCreationTimestamp="2026-01-29 10:59:15 +0000 UTC" firstStartedPulling="2026-01-29 10:59:16.223191999 +0000 UTC m=+1053.440523133" lastFinishedPulling="2026-01-29 10:59:30.52581882 +0000 UTC m=+1067.743149954" observedRunningTime="2026-01-29 10:59:32.86446658 +0000 UTC m=+1070.081797724" watchObservedRunningTime="2026-01-29 10:59:32.901150856 +0000 UTC m=+1070.118482010" Jan 29 10:59:32 crc kubenswrapper[4852]: I0129 10:59:32.911458 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-q477s" podStartSLOduration=7.442791759 podStartE2EDuration="17.911438087s" podCreationTimestamp="2026-01-29 10:59:15 +0000 UTC" firstStartedPulling="2026-01-29 10:59:20.348420625 +0000 UTC m=+1057.565751759" lastFinishedPulling="2026-01-29 10:59:30.817066953 +0000 UTC m=+1068.034398087" observedRunningTime="2026-01-29 10:59:32.8861833 +0000 UTC m=+1070.103514504" watchObservedRunningTime="2026-01-29 10:59:32.911438087 +0000 UTC m=+1070.128769241" Jan 29 10:59:33 crc kubenswrapper[4852]: I0129 10:59:33.477399 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bc7fcc4-d5c9-4094-99bf-176b499b62b0" path="/var/lib/kubelet/pods/9bc7fcc4-d5c9-4094-99bf-176b499b62b0/volumes" Jan 29 10:59:33 crc kubenswrapper[4852]: I0129 10:59:33.478025 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7348c80-c30a-4930-af34-f81e82629e7a" path="/var/lib/kubelet/pods/b7348c80-c30a-4930-af34-f81e82629e7a/volumes" Jan 29 10:59:40 crc kubenswrapper[4852]: I0129 10:59:40.795882 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:40 crc kubenswrapper[4852]: I0129 10:59:40.834814 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 10:59:40 crc kubenswrapper[4852]: I0129 10:59:40.906845 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:40 crc kubenswrapper[4852]: I0129 10:59:40.944869 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-q477s" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="dnsmasq-dns" containerID="cri-o://3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca" gracePeriod=10 Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.524921 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.710672 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w62f\" (UniqueName: \"kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f\") pod \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.710724 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc\") pod \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.710750 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config\") pod \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\" (UID: \"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2\") " Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.749328 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" (UID: "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.758654 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f" (OuterVolumeSpecName: "kube-api-access-6w62f") pod "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" (UID: "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2"). InnerVolumeSpecName "kube-api-access-6w62f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.812687 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w62f\" (UniqueName: \"kubernetes.io/projected/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-kube-api-access-6w62f\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.812734 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.908013 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config" (OuterVolumeSpecName: "config") pod "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" (UID: "7ec82d6d-0193-45e6-8e0a-7cc3b18715a2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.915717 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.971206 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerStarted","Data":"1d33f432ea0c514311532d8ed2d373dc60579c4e1795dff17fb46ca3b939ee8c"} Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.980099 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerStarted","Data":"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572"} Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.988963 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7","Type":"ContainerStarted","Data":"0f3f431c82690a3e3065bdf7cc43eedfed3fad3c3a5ef230080a9b29931b406a"} Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.989606 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.991574 4852 generic.go:334] "Generic (PLEG): container finished" podID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerID="3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca" exitCode=0 Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.991650 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-q477s" event={"ID":"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2","Type":"ContainerDied","Data":"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca"} Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.991667 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-q477s" event={"ID":"7ec82d6d-0193-45e6-8e0a-7cc3b18715a2","Type":"ContainerDied","Data":"f5770bd1d2c6e8b22502896e8333cadbe13987e0002804b7fe6b3592eead0ffc"} Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.991682 4852 scope.go:117] "RemoveContainer" containerID="3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.991773 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-q477s" Jan 29 10:59:41 crc kubenswrapper[4852]: I0129 10:59:41.997069 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerStarted","Data":"0bf6c5dd03fd6115ea46a72029c74c47503ea5da2617ca4acd6c358b19f7581e"} Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.042427 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerStarted","Data":"2efbb95f790b03b0a620fc69f9cb3727ff67bc70ca3bd32306962a43832948f8"} Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.047081 4852 scope.go:117] "RemoveContainer" containerID="bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.058174 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ce70759d-e206-41b9-b8d2-52a8ca74f67c","Type":"ContainerStarted","Data":"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530"} Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.058222 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.061179 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.099927196 podStartE2EDuration="21.061158702s" podCreationTimestamp="2026-01-29 10:59:21 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.285804282 +0000 UTC m=+1068.503135416" lastFinishedPulling="2026-01-29 10:59:41.247035788 +0000 UTC m=+1078.464366922" observedRunningTime="2026-01-29 10:59:42.057004951 +0000 UTC m=+1079.274336085" watchObservedRunningTime="2026-01-29 10:59:42.061158702 +0000 UTC m=+1079.278489836" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.096394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerStarted","Data":"5a4569a0c66938bb15ae418dc4474095bccb853bb517e38eb2c06c8c521e60aa"} Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.160483 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=14.130632861 podStartE2EDuration="23.160465508s" podCreationTimestamp="2026-01-29 10:59:19 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.21363312 +0000 UTC m=+1068.430964254" lastFinishedPulling="2026-01-29 10:59:40.243465767 +0000 UTC m=+1077.460796901" observedRunningTime="2026-01-29 10:59:42.157562787 +0000 UTC m=+1079.374893931" watchObservedRunningTime="2026-01-29 10:59:42.160465508 +0000 UTC m=+1079.377796642" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.176649 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.185624 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-q477s"] Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.187247 4852 scope.go:117] "RemoveContainer" containerID="3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca" Jan 29 10:59:42 crc kubenswrapper[4852]: E0129 10:59:42.188656 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca\": container with ID starting with 3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca not found: ID does not exist" containerID="3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.188690 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca"} err="failed to get container status \"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca\": rpc error: code = NotFound desc = could not find container \"3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca\": container with ID starting with 3e559ff17d0e1765f57813299453bc22efa21d760ffa8245174fc63e2148d7ca not found: ID does not exist" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.188712 4852 scope.go:117] "RemoveContainer" containerID="bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28" Jan 29 10:59:42 crc kubenswrapper[4852]: E0129 10:59:42.189119 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28\": container with ID starting with bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28 not found: ID does not exist" containerID="bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28" Jan 29 10:59:42 crc kubenswrapper[4852]: I0129 10:59:42.189144 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28"} err="failed to get container status \"bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28\": rpc error: code = NotFound desc = could not find container \"bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28\": container with ID starting with bdaa1363008434bf98343d98fe2548b657a57acfb1a87990acc8201d435d1f28 not found: ID does not exist" Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.106558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerStarted","Data":"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a"} Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.108857 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6" event={"ID":"ceb5f935-ccb2-4449-964f-b48d616eefea","Type":"ContainerStarted","Data":"825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8"} Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.108985 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-67sl6" Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.110301 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerStarted","Data":"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28"} Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.112789 4852 generic.go:334] "Generic (PLEG): container finished" podID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerID="0bf6c5dd03fd6115ea46a72029c74c47503ea5da2617ca4acd6c358b19f7581e" exitCode=0 Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.112860 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerDied","Data":"0bf6c5dd03fd6115ea46a72029c74c47503ea5da2617ca4acd6c358b19f7581e"} Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.198013 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-67sl6" podStartSLOduration=9.97113935 podStartE2EDuration="19.197995189s" podCreationTimestamp="2026-01-29 10:59:24 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.292837754 +0000 UTC m=+1068.510168888" lastFinishedPulling="2026-01-29 10:59:40.519693583 +0000 UTC m=+1077.737024727" observedRunningTime="2026-01-29 10:59:43.195039557 +0000 UTC m=+1080.412370691" watchObservedRunningTime="2026-01-29 10:59:43.197995189 +0000 UTC m=+1080.415326323" Jan 29 10:59:43 crc kubenswrapper[4852]: I0129 10:59:43.474259 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" path="/var/lib/kubelet/pods/7ec82d6d-0193-45e6-8e0a-7cc3b18715a2/volumes" Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.123109 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerStarted","Data":"856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1"} Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.123402 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerStarted","Data":"7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f"} Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.123910 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.124074 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.127758 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerStarted","Data":"9caeeb9cc777b568b828a8350ae3044f5968f1db84c2eef74d143f409773f59a"} Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.130342 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerStarted","Data":"71d22b58d4dc22fc8a888b0e9739ec5f63964d6176341a6a37e66f1cb7ee656c"} Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.148271 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-k8pcs" podStartSLOduration=11.35785857 podStartE2EDuration="20.148249859s" podCreationTimestamp="2026-01-29 10:59:24 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.55551122 +0000 UTC m=+1068.772842344" lastFinishedPulling="2026-01-29 10:59:40.345902489 +0000 UTC m=+1077.563233633" observedRunningTime="2026-01-29 10:59:44.141645017 +0000 UTC m=+1081.358976171" watchObservedRunningTime="2026-01-29 10:59:44.148249859 +0000 UTC m=+1081.365580993" Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.164048 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.4205783180000005 podStartE2EDuration="17.164028414s" podCreationTimestamp="2026-01-29 10:59:27 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.374068818 +0000 UTC m=+1068.591399952" lastFinishedPulling="2026-01-29 10:59:43.117518904 +0000 UTC m=+1080.334850048" observedRunningTime="2026-01-29 10:59:44.159578435 +0000 UTC m=+1081.376909589" watchObservedRunningTime="2026-01-29 10:59:44.164028414 +0000 UTC m=+1081.381359548" Jan 29 10:59:44 crc kubenswrapper[4852]: I0129 10:59:44.183421 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=8.528633828 podStartE2EDuration="20.183397427s" podCreationTimestamp="2026-01-29 10:59:24 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.456404169 +0000 UTC m=+1068.673735313" lastFinishedPulling="2026-01-29 10:59:43.111167778 +0000 UTC m=+1080.328498912" observedRunningTime="2026-01-29 10:59:44.177447172 +0000 UTC m=+1081.394778316" watchObservedRunningTime="2026-01-29 10:59:44.183397427 +0000 UTC m=+1081.400728571" Jan 29 10:59:45 crc kubenswrapper[4852]: I0129 10:59:45.914105 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.146063 4852 generic.go:334] "Generic (PLEG): container finished" podID="e5832629-fcd6-441c-a349-f771c099f7b4" containerID="1d33f432ea0c514311532d8ed2d373dc60579c4e1795dff17fb46ca3b939ee8c" exitCode=0 Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.146169 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerDied","Data":"1d33f432ea0c514311532d8ed2d373dc60579c4e1795dff17fb46ca3b939ee8c"} Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.147567 4852 generic.go:334] "Generic (PLEG): container finished" podID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerID="86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572" exitCode=0 Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.147605 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerDied","Data":"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572"} Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.470842 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.517900 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.914182 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:46 crc kubenswrapper[4852]: I0129 10:59:46.948068 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.157705 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerStarted","Data":"10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49"} Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.159486 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerStarted","Data":"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2"} Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.159938 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.196218 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=20.074180185 podStartE2EDuration="29.196185173s" podCreationTimestamp="2026-01-29 10:59:18 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.222372973 +0000 UTC m=+1068.439704107" lastFinishedPulling="2026-01-29 10:59:40.344377961 +0000 UTC m=+1077.561709095" observedRunningTime="2026-01-29 10:59:47.178446479 +0000 UTC m=+1084.395777623" watchObservedRunningTime="2026-01-29 10:59:47.196185173 +0000 UTC m=+1084.413516307" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.200372 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.206305 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.212045 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=22.101886191 podStartE2EDuration="31.212023089s" podCreationTimestamp="2026-01-29 10:59:16 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.299456696 +0000 UTC m=+1068.516787830" lastFinishedPulling="2026-01-29 10:59:40.409593594 +0000 UTC m=+1077.626924728" observedRunningTime="2026-01-29 10:59:47.206566456 +0000 UTC m=+1084.423897600" watchObservedRunningTime="2026-01-29 10:59:47.212023089 +0000 UTC m=+1084.429354223" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.461434 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:47 crc kubenswrapper[4852]: E0129 10:59:47.462137 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="dnsmasq-dns" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.462153 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="dnsmasq-dns" Jan 29 10:59:47 crc kubenswrapper[4852]: E0129 10:59:47.462172 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="init" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.462179 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="init" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.462326 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ec82d6d-0193-45e6-8e0a-7cc3b18715a2" containerName="dnsmasq-dns" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.463245 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.466785 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.474817 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.610063 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbsbp\" (UniqueName: \"kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.610138 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.610400 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.610501 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.658831 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.659991 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.661652 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.668524 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.712371 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.712449 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbsbp\" (UniqueName: \"kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.712488 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.712555 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.713359 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.713385 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.713737 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.741070 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbsbp\" (UniqueName: \"kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp\") pod \"dnsmasq-dns-6bc7876d45-sddb5\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.752756 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.754148 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.762422 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.762678 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.762793 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-tn224" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.762939 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.763282 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.782008 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.782560 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813762 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813828 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813882 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813909 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813934 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.813960 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkls2\" (UniqueName: \"kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.821958 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.823305 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.828596 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.837266 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915443 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915713 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwvlb\" (UniqueName: \"kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915748 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915872 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915948 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.915981 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916005 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916054 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916139 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916180 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916210 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzrdq\" (UniqueName: \"kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916227 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916250 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916266 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916290 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916316 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916342 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916365 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkls2\" (UniqueName: \"kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916488 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916749 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.916786 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.919484 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.920225 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.933379 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkls2\" (UniqueName: \"kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2\") pod \"ovn-controller-metrics-rxcgs\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:47 crc kubenswrapper[4852]: I0129 10:59:47.977434 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017437 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017499 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017520 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017545 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017605 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017638 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzrdq\" (UniqueName: \"kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017667 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017690 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017717 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017792 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017814 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwvlb\" (UniqueName: \"kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.017835 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.018681 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.019237 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.019313 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.019558 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.019673 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.019766 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.020072 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.026281 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.026464 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.026718 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.035228 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwvlb\" (UniqueName: \"kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb\") pod \"dnsmasq-dns-8554648995-z5v7j\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.036891 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzrdq\" (UniqueName: \"kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq\") pod \"ovn-northd-0\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.112806 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.193345 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.267789 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.267839 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.279182 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.436921 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 10:59:48 crc kubenswrapper[4852]: W0129 10:59:48.446560 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d4860e9_e690_409f_bc12_86a1c51e6db1.slice/crio-32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df WatchSource:0}: Error finding container 32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df: Status 404 returned error can't find the container with id 32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.580539 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 10:59:48 crc kubenswrapper[4852]: W0129 10:59:48.600117 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb99e639e_b687_4552_bfa0_ed4391283aaf.slice/crio-4e70ab5c672e7441c2c7feba35acabc2b55fa9d89e93a178b684ce859c3f6cf3 WatchSource:0}: Error finding container 4e70ab5c672e7441c2c7feba35acabc2b55fa9d89e93a178b684ce859c3f6cf3: Status 404 returned error can't find the container with id 4e70ab5c672e7441c2c7feba35acabc2b55fa9d89e93a178b684ce859c3f6cf3 Jan 29 10:59:48 crc kubenswrapper[4852]: I0129 10:59:48.685161 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:48 crc kubenswrapper[4852]: W0129 10:59:48.688909 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd96606bd_5459_4af8_8894_59a71f43e365.slice/crio-e1782d359ca9db097149c1746d788fd6331ef750a9363322517156615c758d93 WatchSource:0}: Error finding container e1782d359ca9db097149c1746d788fd6331ef750a9363322517156615c758d93: Status 404 returned error can't find the container with id e1782d359ca9db097149c1746d788fd6331ef750a9363322517156615c758d93 Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.174509 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerStarted","Data":"4e70ab5c672e7441c2c7feba35acabc2b55fa9d89e93a178b684ce859c3f6cf3"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.176065 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" event={"ID":"23dd0e69-859b-4242-b74f-c2eac7362a25","Type":"ContainerStarted","Data":"6efb5b0282255358c0ca89b7552ef9d46e869d7c5fa76801b6c964fbf01306ca"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.177288 4852 generic.go:334] "Generic (PLEG): container finished" podID="d96606bd-5459-4af8-8894-59a71f43e365" containerID="d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023" exitCode=0 Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.177339 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-z5v7j" event={"ID":"d96606bd-5459-4af8-8894-59a71f43e365","Type":"ContainerDied","Data":"d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.177354 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-z5v7j" event={"ID":"d96606bd-5459-4af8-8894-59a71f43e365","Type":"ContainerStarted","Data":"e1782d359ca9db097149c1746d788fd6331ef750a9363322517156615c758d93"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.179321 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rxcgs" event={"ID":"1d4860e9-e690-409f-bc12-86a1c51e6db1","Type":"ContainerStarted","Data":"655c5a497b7193f8e8b2150d018d78c5d4b8e8338aa5d573bffd5c89cd5db084"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.179346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rxcgs" event={"ID":"1d4860e9-e690-409f-bc12-86a1c51e6db1","Type":"ContainerStarted","Data":"32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df"} Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.230453 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-rxcgs" podStartSLOduration=2.230429027 podStartE2EDuration="2.230429027s" podCreationTimestamp="2026-01-29 10:59:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:59:49.224516753 +0000 UTC m=+1086.441847927" watchObservedRunningTime="2026-01-29 10:59:49.230429027 +0000 UTC m=+1086.447760161" Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.584001 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.584540 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:49 crc kubenswrapper[4852]: I0129 10:59:49.889431 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.190748 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerStarted","Data":"9b8c01c6c407ae7627a56b9f7843d60ca4bb8c2d21417edb2f07b4193f385d24"} Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.195710 4852 generic.go:334] "Generic (PLEG): container finished" podID="23dd0e69-859b-4242-b74f-c2eac7362a25" containerID="72758c3119f3ecc0f67202197a6cbbe37bc2ee2693a317d1e28eefff3ea309b0" exitCode=0 Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.195775 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" event={"ID":"23dd0e69-859b-4242-b74f-c2eac7362a25","Type":"ContainerDied","Data":"72758c3119f3ecc0f67202197a6cbbe37bc2ee2693a317d1e28eefff3ea309b0"} Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.200519 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-z5v7j" event={"ID":"d96606bd-5459-4af8-8894-59a71f43e365","Type":"ContainerStarted","Data":"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0"} Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.201208 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.242124 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-z5v7j" podStartSLOduration=3.242104867 podStartE2EDuration="3.242104867s" podCreationTimestamp="2026-01-29 10:59:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:59:50.232721547 +0000 UTC m=+1087.450052701" watchObservedRunningTime="2026-01-29 10:59:50.242104867 +0000 UTC m=+1087.459436001" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.528132 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.562131 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbsbp\" (UniqueName: \"kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp\") pod \"23dd0e69-859b-4242-b74f-c2eac7362a25\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.562257 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config\") pod \"23dd0e69-859b-4242-b74f-c2eac7362a25\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.562378 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc\") pod \"23dd0e69-859b-4242-b74f-c2eac7362a25\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.562414 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb\") pod \"23dd0e69-859b-4242-b74f-c2eac7362a25\" (UID: \"23dd0e69-859b-4242-b74f-c2eac7362a25\") " Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.566727 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp" (OuterVolumeSpecName: "kube-api-access-qbsbp") pod "23dd0e69-859b-4242-b74f-c2eac7362a25" (UID: "23dd0e69-859b-4242-b74f-c2eac7362a25"). InnerVolumeSpecName "kube-api-access-qbsbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.580669 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config" (OuterVolumeSpecName: "config") pod "23dd0e69-859b-4242-b74f-c2eac7362a25" (UID: "23dd0e69-859b-4242-b74f-c2eac7362a25"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.587049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "23dd0e69-859b-4242-b74f-c2eac7362a25" (UID: "23dd0e69-859b-4242-b74f-c2eac7362a25"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.588042 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "23dd0e69-859b-4242-b74f-c2eac7362a25" (UID: "23dd0e69-859b-4242-b74f-c2eac7362a25"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.664288 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbsbp\" (UniqueName: \"kubernetes.io/projected/23dd0e69-859b-4242-b74f-c2eac7362a25-kube-api-access-qbsbp\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.664334 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.664347 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:50 crc kubenswrapper[4852]: I0129 10:59:50.664358 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23dd0e69-859b-4242-b74f-c2eac7362a25-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.210070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" event={"ID":"23dd0e69-859b-4242-b74f-c2eac7362a25","Type":"ContainerDied","Data":"6efb5b0282255358c0ca89b7552ef9d46e869d7c5fa76801b6c964fbf01306ca"} Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.210094 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-sddb5" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.210404 4852 scope.go:117] "RemoveContainer" containerID="72758c3119f3ecc0f67202197a6cbbe37bc2ee2693a317d1e28eefff3ea309b0" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.214031 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerStarted","Data":"a923b5cbfe6f467279e0395781d92a15fa09516398b7b3fef3ba567acaf19c1b"} Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.214082 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.293088 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.896138556 podStartE2EDuration="4.293065066s" podCreationTimestamp="2026-01-29 10:59:47 +0000 UTC" firstStartedPulling="2026-01-29 10:59:48.602878869 +0000 UTC m=+1085.820210003" lastFinishedPulling="2026-01-29 10:59:49.999805379 +0000 UTC m=+1087.217136513" observedRunningTime="2026-01-29 10:59:51.239827626 +0000 UTC m=+1088.457158780" watchObservedRunningTime="2026-01-29 10:59:51.293065066 +0000 UTC m=+1088.510396200" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.303027 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.308240 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-sddb5"] Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.471956 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23dd0e69-859b-4242-b74f-c2eac7362a25" path="/var/lib/kubelet/pods/23dd0e69-859b-4242-b74f-c2eac7362a25/volumes" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.630401 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.704298 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.744352 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 10:59:51 crc kubenswrapper[4852]: E0129 10:59:51.744788 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23dd0e69-859b-4242-b74f-c2eac7362a25" containerName="init" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.749723 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="23dd0e69-859b-4242-b74f-c2eac7362a25" containerName="init" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.750098 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="23dd0e69-859b-4242-b74f-c2eac7362a25" containerName="init" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.755938 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.786028 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.786127 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.786162 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.786198 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzc6j\" (UniqueName: \"kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.786234 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.787654 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.888284 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.888361 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.888405 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzc6j\" (UniqueName: \"kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.888443 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.888532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.889523 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.889640 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.889747 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.890245 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:51 crc kubenswrapper[4852]: I0129 10:59:51.916768 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzc6j\" (UniqueName: \"kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j\") pod \"dnsmasq-dns-b8fbc5445-stv2s\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.081569 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.296823 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.376125 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.537811 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 10:59:52 crc kubenswrapper[4852]: W0129 10:59:52.551150 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6257f62d_458f_44ac_abcb_d04f4d6119fa.slice/crio-4f45ae8c39e03cfac84ba62dc7709338ed4d303b867445c8189436879abebc10 WatchSource:0}: Error finding container 4f45ae8c39e03cfac84ba62dc7709338ed4d303b867445c8189436879abebc10: Status 404 returned error can't find the container with id 4f45ae8c39e03cfac84ba62dc7709338ed4d303b867445c8189436879abebc10 Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.948250 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.956822 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.958473 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.958948 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.960153 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-nd9wq" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.961008 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 29 10:59:52 crc kubenswrapper[4852]: I0129 10:59:52.974105 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010020 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlh4f\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010056 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010330 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010563 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010749 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.010820 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.112079 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlh4f\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.112140 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.112192 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.112381 4852 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.112418 4852 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.112637 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.119677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.119759 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift podName:8111fd43-32e9-4654-bf8e-444fbce4933a nodeName:}" failed. No retries permitted until 2026-01-29 10:59:53.61971316 +0000 UTC m=+1090.837044284 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift") pod "swift-storage-0" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a") : configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.119981 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.120072 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.120448 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.120573 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.142467 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.142487 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlh4f\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.156148 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.250522 4852 generic.go:334] "Generic (PLEG): container finished" podID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerID="bf6ed541cda46af4ef834cbab569c19b1408ee781c6ca3eaa57ea916f87cbe92" exitCode=0 Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.251037 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-z5v7j" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="dnsmasq-dns" containerID="cri-o://4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0" gracePeriod=10 Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.251770 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" event={"ID":"6257f62d-458f-44ac-abcb-d04f4d6119fa","Type":"ContainerDied","Data":"bf6ed541cda46af4ef834cbab569c19b1408ee781c6ca3eaa57ea916f87cbe92"} Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.251860 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" event={"ID":"6257f62d-458f-44ac-abcb-d04f4d6119fa","Type":"ContainerStarted","Data":"4f45ae8c39e03cfac84ba62dc7709338ed4d303b867445c8189436879abebc10"} Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.428553 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-kbmbt"] Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.431078 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.433004 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.433492 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.434018 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.440279 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-kbmbt"] Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.526559 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527283 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7lvq\" (UniqueName: \"kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527558 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527684 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527764 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.527857 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631520 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631653 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631689 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631713 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631759 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631798 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631822 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.631890 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7lvq\" (UniqueName: \"kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.632922 4852 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.632942 4852 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: E0129 10:59:53.632994 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift podName:8111fd43-32e9-4654-bf8e-444fbce4933a nodeName:}" failed. No retries permitted until 2026-01-29 10:59:54.632972456 +0000 UTC m=+1091.850303590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift") pod "swift-storage-0" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a") : configmap "swift-ring-files" not found Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.633183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.633301 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.633492 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.635861 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.636020 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.639399 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.647695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7lvq\" (UniqueName: \"kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq\") pod \"swift-ring-rebalance-kbmbt\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.777078 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.825741 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.843537 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc\") pod \"d96606bd-5459-4af8-8894-59a71f43e365\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.843676 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config\") pod \"d96606bd-5459-4af8-8894-59a71f43e365\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.843732 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb\") pod \"d96606bd-5459-4af8-8894-59a71f43e365\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.843764 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwvlb\" (UniqueName: \"kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb\") pod \"d96606bd-5459-4af8-8894-59a71f43e365\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.843858 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb\") pod \"d96606bd-5459-4af8-8894-59a71f43e365\" (UID: \"d96606bd-5459-4af8-8894-59a71f43e365\") " Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.850148 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb" (OuterVolumeSpecName: "kube-api-access-xwvlb") pod "d96606bd-5459-4af8-8894-59a71f43e365" (UID: "d96606bd-5459-4af8-8894-59a71f43e365"). InnerVolumeSpecName "kube-api-access-xwvlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.913053 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d96606bd-5459-4af8-8894-59a71f43e365" (UID: "d96606bd-5459-4af8-8894-59a71f43e365"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.922115 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d96606bd-5459-4af8-8894-59a71f43e365" (UID: "d96606bd-5459-4af8-8894-59a71f43e365"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.938643 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config" (OuterVolumeSpecName: "config") pod "d96606bd-5459-4af8-8894-59a71f43e365" (UID: "d96606bd-5459-4af8-8894-59a71f43e365"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.951546 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.951738 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-config\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.951754 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.951822 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwvlb\" (UniqueName: \"kubernetes.io/projected/d96606bd-5459-4af8-8894-59a71f43e365-kube-api-access-xwvlb\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:53 crc kubenswrapper[4852]: I0129 10:59:53.955023 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d96606bd-5459-4af8-8894-59a71f43e365" (UID: "d96606bd-5459-4af8-8894-59a71f43e365"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.054252 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d96606bd-5459-4af8-8894-59a71f43e365-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.058263 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-kbmbt"] Jan 29 10:59:54 crc kubenswrapper[4852]: W0129 10:59:54.066118 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f5dfb94_d484_48d3_ab84_70c647e30d2e.slice/crio-958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9 WatchSource:0}: Error finding container 958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9: Status 404 returned error can't find the container with id 958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9 Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.260201 4852 generic.go:334] "Generic (PLEG): container finished" podID="d96606bd-5459-4af8-8894-59a71f43e365" containerID="4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0" exitCode=0 Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.260290 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-z5v7j" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.260306 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-z5v7j" event={"ID":"d96606bd-5459-4af8-8894-59a71f43e365","Type":"ContainerDied","Data":"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0"} Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.260341 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-z5v7j" event={"ID":"d96606bd-5459-4af8-8894-59a71f43e365","Type":"ContainerDied","Data":"e1782d359ca9db097149c1746d788fd6331ef750a9363322517156615c758d93"} Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.260364 4852 scope.go:117] "RemoveContainer" containerID="4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.262374 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kbmbt" event={"ID":"6f5dfb94-d484-48d3-ab84-70c647e30d2e","Type":"ContainerStarted","Data":"958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9"} Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.266181 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" event={"ID":"6257f62d-458f-44ac-abcb-d04f4d6119fa","Type":"ContainerStarted","Data":"22be95aa4a2fc2e72e0861a1e906afc3e8e80e69d7c30301af918bf96fc492e0"} Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.266344 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.295476 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" podStartSLOduration=3.295457667 podStartE2EDuration="3.295457667s" podCreationTimestamp="2026-01-29 10:59:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 10:59:54.290018314 +0000 UTC m=+1091.507349458" watchObservedRunningTime="2026-01-29 10:59:54.295457667 +0000 UTC m=+1091.512788811" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.297934 4852 scope.go:117] "RemoveContainer" containerID="d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.330819 4852 scope.go:117] "RemoveContainer" containerID="4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0" Jan 29 10:59:54 crc kubenswrapper[4852]: E0129 10:59:54.332490 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0\": container with ID starting with 4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0 not found: ID does not exist" containerID="4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.332533 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0"} err="failed to get container status \"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0\": rpc error: code = NotFound desc = could not find container \"4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0\": container with ID starting with 4c26b2a3b7261162f66c235ab4c489e19ffc9a2e155a03015e710ddc881ab2c0 not found: ID does not exist" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.332557 4852 scope.go:117] "RemoveContainer" containerID="d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023" Jan 29 10:59:54 crc kubenswrapper[4852]: E0129 10:59:54.332930 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023\": container with ID starting with d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023 not found: ID does not exist" containerID="d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.332952 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023"} err="failed to get container status \"d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023\": rpc error: code = NotFound desc = could not find container \"d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023\": container with ID starting with d2d4974f74d7d974dc2f3fd2e5264fbeaf95141cb7392040bd1ca6e0047c0023 not found: ID does not exist" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.336699 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.343971 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-z5v7j"] Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.376900 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.454216 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 29 10:59:54 crc kubenswrapper[4852]: I0129 10:59:54.663743 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:54 crc kubenswrapper[4852]: E0129 10:59:54.663914 4852 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 10:59:54 crc kubenswrapper[4852]: E0129 10:59:54.663934 4852 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 10:59:54 crc kubenswrapper[4852]: E0129 10:59:54.663988 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift podName:8111fd43-32e9-4654-bf8e-444fbce4933a nodeName:}" failed. No retries permitted until 2026-01-29 10:59:56.663970797 +0000 UTC m=+1093.881301931 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift") pod "swift-storage-0" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a") : configmap "swift-ring-files" not found Jan 29 10:59:55 crc kubenswrapper[4852]: I0129 10:59:55.490365 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d96606bd-5459-4af8-8894-59a71f43e365" path="/var/lib/kubelet/pods/d96606bd-5459-4af8-8894-59a71f43e365/volumes" Jan 29 10:59:56 crc kubenswrapper[4852]: I0129 10:59:56.698948 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 10:59:56 crc kubenswrapper[4852]: E0129 10:59:56.699176 4852 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 10:59:56 crc kubenswrapper[4852]: E0129 10:59:56.699402 4852 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 10:59:56 crc kubenswrapper[4852]: E0129 10:59:56.699459 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift podName:8111fd43-32e9-4654-bf8e-444fbce4933a nodeName:}" failed. No retries permitted until 2026-01-29 11:00:00.699442002 +0000 UTC m=+1097.916773136 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift") pod "swift-storage-0" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a") : configmap "swift-ring-files" not found Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.006483 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-fhh55"] Jan 29 10:59:57 crc kubenswrapper[4852]: E0129 10:59:57.006893 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="init" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.006914 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="init" Jan 29 10:59:57 crc kubenswrapper[4852]: E0129 10:59:57.006926 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="dnsmasq-dns" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.006933 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="dnsmasq-dns" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.007099 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d96606bd-5459-4af8-8894-59a71f43e365" containerName="dnsmasq-dns" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.007630 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.010991 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.012764 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fhh55"] Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.107275 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kjs4\" (UniqueName: \"kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.107334 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.209149 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kjs4\" (UniqueName: \"kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.209195 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.209938 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.248084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kjs4\" (UniqueName: \"kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4\") pod \"root-account-create-update-fhh55\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:57 crc kubenswrapper[4852]: I0129 10:59:57.330207 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fhh55" Jan 29 10:59:58 crc kubenswrapper[4852]: I0129 10:59:58.557765 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fhh55"] Jan 29 10:59:58 crc kubenswrapper[4852]: W0129 10:59:58.566832 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29a73aa0_7309_4b61_8bdf_7b5597c4f257.slice/crio-1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92 WatchSource:0}: Error finding container 1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92: Status 404 returned error can't find the container with id 1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92 Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.328134 4852 generic.go:334] "Generic (PLEG): container finished" podID="29a73aa0-7309-4b61-8bdf-7b5597c4f257" containerID="72626e75fb57ea81ee5bbbd47796aad884f9aa87f9e40895da456464b3723a68" exitCode=0 Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.328272 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fhh55" event={"ID":"29a73aa0-7309-4b61-8bdf-7b5597c4f257","Type":"ContainerDied","Data":"72626e75fb57ea81ee5bbbd47796aad884f9aa87f9e40895da456464b3723a68"} Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.328544 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fhh55" event={"ID":"29a73aa0-7309-4b61-8bdf-7b5597c4f257","Type":"ContainerStarted","Data":"1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92"} Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.331837 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kbmbt" event={"ID":"6f5dfb94-d484-48d3-ab84-70c647e30d2e","Type":"ContainerStarted","Data":"a05938cf7aa43400ac64d077c4f27bd8aac2609da89a2cc7553fff6b92d2605d"} Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.377385 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-kbmbt" podStartSLOduration=2.324220074 podStartE2EDuration="6.377365839s" podCreationTimestamp="2026-01-29 10:59:53 +0000 UTC" firstStartedPulling="2026-01-29 10:59:54.06876701 +0000 UTC m=+1091.286098134" lastFinishedPulling="2026-01-29 10:59:58.121912765 +0000 UTC m=+1095.339243899" observedRunningTime="2026-01-29 10:59:59.362165158 +0000 UTC m=+1096.579496292" watchObservedRunningTime="2026-01-29 10:59:59.377365839 +0000 UTC m=+1096.594696983" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.497115 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-7nwwb"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.498861 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.511693 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7nwwb"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.602615 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f67f-account-create-update-lmxm7"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.604333 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.608793 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.612077 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmxm7"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.650439 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7n4b\" (UniqueName: \"kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.650487 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.752486 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7n4b\" (UniqueName: \"kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.752552 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd9t9\" (UniqueName: \"kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.752608 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.752722 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.753779 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.775793 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7n4b\" (UniqueName: \"kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b\") pod \"keystone-db-create-7nwwb\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.828345 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7nwwb" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.854955 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.855393 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd9t9\" (UniqueName: \"kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.855909 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.868381 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-j4njn"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.869840 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4njn" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.879944 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j4njn"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.904474 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd9t9\" (UniqueName: \"kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9\") pod \"keystone-f67f-account-create-update-lmxm7\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.926848 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.973575 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-a154-account-create-update-l267s"] Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.974837 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a154-account-create-update-l267s" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.980838 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 10:59:59 crc kubenswrapper[4852]: I0129 10:59:59.985211 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-a154-account-create-update-l267s"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.017055 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.017101 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.059211 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.060485 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zds4j\" (UniqueName: \"kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.089025 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-sdvb7"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.092744 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.111189 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sdvb7"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.156671 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.160842 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.161820 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.161914 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmd7m\" (UniqueName: \"kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.162712 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.163006 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.163110 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zds4j\" (UniqueName: \"kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.163138 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.163623 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.167084 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.192275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zds4j\" (UniqueName: \"kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j\") pod \"placement-db-create-j4njn\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.194742 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-cfee-account-create-update-5hl62"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.195986 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.202153 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.222736 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-cfee-account-create-update-5hl62"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.264781 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265141 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjqrl\" (UniqueName: \"kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265253 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmd7m\" (UniqueName: \"kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265541 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2cbk\" (UniqueName: \"kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.265827 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.266719 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.281371 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmd7m\" (UniqueName: \"kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m\") pod \"placement-a154-account-create-update-l267s\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.331474 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7nwwb"] Jan 29 11:00:00 crc kubenswrapper[4852]: W0129 11:00:00.334933 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7018648b_8a68_423d_9532_8222c0c4b6cc.slice/crio-58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea WatchSource:0}: Error finding container 58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea: Status 404 returned error can't find the container with id 58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.349647 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4njn" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367010 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf4pn\" (UniqueName: \"kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367067 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367112 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367131 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2cbk\" (UniqueName: \"kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367221 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367239 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjqrl\" (UniqueName: \"kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.367261 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.368378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.369177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.372879 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.373313 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.385433 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2cbk\" (UniqueName: \"kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk\") pod \"collect-profiles-29494740-mwtrz\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.385904 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjqrl\" (UniqueName: \"kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl\") pod \"glance-db-create-sdvb7\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.422494 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.468392 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.468536 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf4pn\" (UniqueName: \"kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.471151 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.490737 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.491568 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf4pn\" (UniqueName: \"kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn\") pod \"glance-cfee-account-create-update-5hl62\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.528505 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.534879 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmxm7"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.684154 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fhh55" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.779099 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 11:00:00 crc kubenswrapper[4852]: E0129 11:00:00.779281 4852 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 11:00:00 crc kubenswrapper[4852]: E0129 11:00:00.779295 4852 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 11:00:00 crc kubenswrapper[4852]: E0129 11:00:00.779339 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift podName:8111fd43-32e9-4654-bf8e-444fbce4933a nodeName:}" failed. No retries permitted until 2026-01-29 11:00:08.779323481 +0000 UTC m=+1105.996654615 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift") pod "swift-storage-0" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a") : configmap "swift-ring-files" not found Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.882442 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts\") pod \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.882498 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kjs4\" (UniqueName: \"kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4\") pod \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\" (UID: \"29a73aa0-7309-4b61-8bdf-7b5597c4f257\") " Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.889053 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4" (OuterVolumeSpecName: "kube-api-access-5kjs4") pod "29a73aa0-7309-4b61-8bdf-7b5597c4f257" (UID: "29a73aa0-7309-4b61-8bdf-7b5597c4f257"). InnerVolumeSpecName "kube-api-access-5kjs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.890012 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29a73aa0-7309-4b61-8bdf-7b5597c4f257" (UID: "29a73aa0-7309-4b61-8bdf-7b5597c4f257"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.958281 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j4njn"] Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.996394 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29a73aa0-7309-4b61-8bdf-7b5597c4f257-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:00 crc kubenswrapper[4852]: I0129 11:00:00.996437 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kjs4\" (UniqueName: \"kubernetes.io/projected/29a73aa0-7309-4b61-8bdf-7b5597c4f257-kube-api-access-5kjs4\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.066425 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-a154-account-create-update-l267s"] Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.250887 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sdvb7"] Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.281463 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-cfee-account-create-update-5hl62"] Jan 29 11:00:01 crc kubenswrapper[4852]: W0129 11:00:01.287598 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86ae0309_cead_43f3_bf5b_617d972977c5.slice/crio-7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd WatchSource:0}: Error finding container 7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd: Status 404 returned error can't find the container with id 7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.349618 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cfee-account-create-update-5hl62" event={"ID":"86ae0309-cead-43f3-bf5b-617d972977c5","Type":"ContainerStarted","Data":"7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.350972 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4njn" event={"ID":"e67bd911-5506-4636-8b80-9f5a73e0c99f","Type":"ContainerStarted","Data":"40ed47ea04c4216bd0f780793b296ae1383073439d898f7cae9b961a5754c9cf"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.352088 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f67f-account-create-update-lmxm7" event={"ID":"4d3b9c66-5911-42ee-ad3d-c746b8aa5364","Type":"ContainerStarted","Data":"c379874404a8707c272b309abb9a69d2de4332d2beb0d5dd9373e06f33d15fd9"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.356303 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fhh55" event={"ID":"29a73aa0-7309-4b61-8bdf-7b5597c4f257","Type":"ContainerDied","Data":"1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.356327 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fhh55" Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.356335 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dbca1e60684d850306ce3be58fd8a96d9ae423bd48f88ccb3e100a89b72bb92" Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.359132 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sdvb7" event={"ID":"0333efd0-887f-4c8f-a25f-b6e7f56068c0","Type":"ContainerStarted","Data":"e6b7174a9fb2c630f463b69a5da23cf1f65354b6851c326238c3da90f542ddd9"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.363394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a154-account-create-update-l267s" event={"ID":"e8062643-e2e7-45e4-aab3-e39d07b2946c","Type":"ContainerStarted","Data":"aea8ccd8a21d955d1ef0e9f8625b63e982a02cc164b33fd083b26a6edebb7c15"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.364691 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7nwwb" event={"ID":"7018648b-8a68-423d-9532-8222c0c4b6cc","Type":"ContainerStarted","Data":"58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea"} Jan 29 11:00:01 crc kubenswrapper[4852]: I0129 11:00:01.378036 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz"] Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.083042 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.194953 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.195204 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="dnsmasq-dns" containerID="cri-o://4e9d844945a3af9d01db6e9f68497c0f269565d408a2695a0a6a424055235533" gracePeriod=10 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.373475 4852 generic.go:334] "Generic (PLEG): container finished" podID="4d3b9c66-5911-42ee-ad3d-c746b8aa5364" containerID="b4d4086f1f38fc735421b6e2ec75b96f1944a10c6bc1cbca217bbc72da3c061e" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.374050 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f67f-account-create-update-lmxm7" event={"ID":"4d3b9c66-5911-42ee-ad3d-c746b8aa5364","Type":"ContainerDied","Data":"b4d4086f1f38fc735421b6e2ec75b96f1944a10c6bc1cbca217bbc72da3c061e"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.384544 4852 generic.go:334] "Generic (PLEG): container finished" podID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerID="4e9d844945a3af9d01db6e9f68497c0f269565d408a2695a0a6a424055235533" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.384620 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerDied","Data":"4e9d844945a3af9d01db6e9f68497c0f269565d408a2695a0a6a424055235533"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.386930 4852 generic.go:334] "Generic (PLEG): container finished" podID="4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" containerID="cacc58bc0392c9c8309ea56f2b407ffb1227c73dfbc8cdc947cccef4d096e1a3" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.386969 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" event={"ID":"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47","Type":"ContainerDied","Data":"cacc58bc0392c9c8309ea56f2b407ffb1227c73dfbc8cdc947cccef4d096e1a3"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.386984 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" event={"ID":"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47","Type":"ContainerStarted","Data":"080ef8544a85c0e224982696a01bbefdbc8f7dc2eae2581b47735a2776cebc64"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.401101 4852 generic.go:334] "Generic (PLEG): container finished" podID="0333efd0-887f-4c8f-a25f-b6e7f56068c0" containerID="f196aaf25763e1b86b3c1a188cfcfddffdbe783ec9c27d57363fa169a5176bff" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.401397 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sdvb7" event={"ID":"0333efd0-887f-4c8f-a25f-b6e7f56068c0","Type":"ContainerDied","Data":"f196aaf25763e1b86b3c1a188cfcfddffdbe783ec9c27d57363fa169a5176bff"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.407138 4852 generic.go:334] "Generic (PLEG): container finished" podID="e8062643-e2e7-45e4-aab3-e39d07b2946c" containerID="f82ea28adea60d3d3b85593db30daee168785cf0621ab4ad5ab37f1c6759fb5b" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.407201 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a154-account-create-update-l267s" event={"ID":"e8062643-e2e7-45e4-aab3-e39d07b2946c","Type":"ContainerDied","Data":"f82ea28adea60d3d3b85593db30daee168785cf0621ab4ad5ab37f1c6759fb5b"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.408559 4852 generic.go:334] "Generic (PLEG): container finished" podID="7018648b-8a68-423d-9532-8222c0c4b6cc" containerID="5d0052d8fbb2c502a21137e2acb2a4c06f83feef0268e7f3910bd71706399868" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.408622 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7nwwb" event={"ID":"7018648b-8a68-423d-9532-8222c0c4b6cc","Type":"ContainerDied","Data":"5d0052d8fbb2c502a21137e2acb2a4c06f83feef0268e7f3910bd71706399868"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.409940 4852 generic.go:334] "Generic (PLEG): container finished" podID="86ae0309-cead-43f3-bf5b-617d972977c5" containerID="b1c723036342d80c512f029c8c3089fe19ed55b50afbd96280c5754ddc399172" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.409985 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cfee-account-create-update-5hl62" event={"ID":"86ae0309-cead-43f3-bf5b-617d972977c5","Type":"ContainerDied","Data":"b1c723036342d80c512f029c8c3089fe19ed55b50afbd96280c5754ddc399172"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.413066 4852 generic.go:334] "Generic (PLEG): container finished" podID="e67bd911-5506-4636-8b80-9f5a73e0c99f" containerID="512a948f46c227dae55a3847babe03fbe428370e46c983712358f2759595f57f" exitCode=0 Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.413100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4njn" event={"ID":"e67bd911-5506-4636-8b80-9f5a73e0c99f","Type":"ContainerDied","Data":"512a948f46c227dae55a3847babe03fbe428370e46c983712358f2759595f57f"} Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.639924 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.825010 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc\") pod \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.825135 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config\") pod \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.825298 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gzfx\" (UniqueName: \"kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx\") pod \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\" (UID: \"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0\") " Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.830086 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx" (OuterVolumeSpecName: "kube-api-access-5gzfx") pod "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" (UID: "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0"). InnerVolumeSpecName "kube-api-access-5gzfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.865035 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config" (OuterVolumeSpecName: "config") pod "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" (UID: "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.878042 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" (UID: "d8bf3dbd-226b-4092-b1b0-29dc8696c0f0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.927727 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.927767 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:02 crc kubenswrapper[4852]: I0129 11:00:02.927787 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gzfx\" (UniqueName: \"kubernetes.io/projected/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0-kube-api-access-5gzfx\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.243380 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-fhh55"] Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.254809 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-fhh55"] Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.422436 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" event={"ID":"d8bf3dbd-226b-4092-b1b0-29dc8696c0f0","Type":"ContainerDied","Data":"bb283acbebded98ba563c94291b61059a577d576befd8eb0e519379f866ea8e9"} Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.422749 4852 scope.go:117] "RemoveContainer" containerID="4e9d844945a3af9d01db6e9f68497c0f269565d408a2695a0a6a424055235533" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.422545 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-gpjt4" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.480977 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29a73aa0-7309-4b61-8bdf-7b5597c4f257" path="/var/lib/kubelet/pods/29a73aa0-7309-4b61-8bdf-7b5597c4f257/volumes" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.481312 4852 scope.go:117] "RemoveContainer" containerID="49c522f6394f895c70208779ce476975410c0b3c80178f157f32ccec7fb91457" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.481599 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.481638 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-gpjt4"] Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.736260 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.823567 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4njn" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.844024 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts\") pod \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.844095 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xd9t9\" (UniqueName: \"kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9\") pod \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\" (UID: \"4d3b9c66-5911-42ee-ad3d-c746b8aa5364\") " Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.847776 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d3b9c66-5911-42ee-ad3d-c746b8aa5364" (UID: "4d3b9c66-5911-42ee-ad3d-c746b8aa5364"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.851901 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9" (OuterVolumeSpecName: "kube-api-access-xd9t9") pod "4d3b9c66-5911-42ee-ad3d-c746b8aa5364" (UID: "4d3b9c66-5911-42ee-ad3d-c746b8aa5364"). InnerVolumeSpecName "kube-api-access-xd9t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.945090 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zds4j\" (UniqueName: \"kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j\") pod \"e67bd911-5506-4636-8b80-9f5a73e0c99f\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.945501 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts\") pod \"e67bd911-5506-4636-8b80-9f5a73e0c99f\" (UID: \"e67bd911-5506-4636-8b80-9f5a73e0c99f\") " Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.946244 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.946352 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xd9t9\" (UniqueName: \"kubernetes.io/projected/4d3b9c66-5911-42ee-ad3d-c746b8aa5364-kube-api-access-xd9t9\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.947799 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e67bd911-5506-4636-8b80-9f5a73e0c99f" (UID: "e67bd911-5506-4636-8b80-9f5a73e0c99f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:03 crc kubenswrapper[4852]: I0129 11:00:03.954209 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j" (OuterVolumeSpecName: "kube-api-access-zds4j") pod "e67bd911-5506-4636-8b80-9f5a73e0c99f" (UID: "e67bd911-5506-4636-8b80-9f5a73e0c99f"). InnerVolumeSpecName "kube-api-access-zds4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.050842 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67bd911-5506-4636-8b80-9f5a73e0c99f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.050874 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zds4j\" (UniqueName: \"kubernetes.io/projected/e67bd911-5506-4636-8b80-9f5a73e0c99f-kube-api-access-zds4j\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.145184 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7nwwb" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.150903 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.151635 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume\") pod \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.155940 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" (UID: "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.164945 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.186917 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.194861 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.252543 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2cbk\" (UniqueName: \"kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk\") pod \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.252612 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7n4b\" (UniqueName: \"kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b\") pod \"7018648b-8a68-423d-9532-8222c0c4b6cc\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.252652 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume\") pod \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\" (UID: \"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.252728 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts\") pod \"7018648b-8a68-423d-9532-8222c0c4b6cc\" (UID: \"7018648b-8a68-423d-9532-8222c0c4b6cc\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.253223 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7018648b-8a68-423d-9532-8222c0c4b6cc" (UID: "7018648b-8a68-423d-9532-8222c0c4b6cc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.253234 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.253499 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume" (OuterVolumeSpecName: "config-volume") pod "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" (UID: "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.256839 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b" (OuterVolumeSpecName: "kube-api-access-b7n4b") pod "7018648b-8a68-423d-9532-8222c0c4b6cc" (UID: "7018648b-8a68-423d-9532-8222c0c4b6cc"). InnerVolumeSpecName "kube-api-access-b7n4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.256917 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk" (OuterVolumeSpecName: "kube-api-access-s2cbk") pod "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" (UID: "4d1dac7e-d83f-4d2f-bc9f-fd197a406a47"). InnerVolumeSpecName "kube-api-access-s2cbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.353708 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts\") pod \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.353780 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts\") pod \"e8062643-e2e7-45e4-aab3-e39d07b2946c\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.353932 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf4pn\" (UniqueName: \"kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn\") pod \"86ae0309-cead-43f3-bf5b-617d972977c5\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354015 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts\") pod \"86ae0309-cead-43f3-bf5b-617d972977c5\" (UID: \"86ae0309-cead-43f3-bf5b-617d972977c5\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354043 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmd7m\" (UniqueName: \"kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m\") pod \"e8062643-e2e7-45e4-aab3-e39d07b2946c\" (UID: \"e8062643-e2e7-45e4-aab3-e39d07b2946c\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjqrl\" (UniqueName: \"kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl\") pod \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\" (UID: \"0333efd0-887f-4c8f-a25f-b6e7f56068c0\") " Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354557 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2cbk\" (UniqueName: \"kubernetes.io/projected/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-kube-api-access-s2cbk\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354612 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7n4b\" (UniqueName: \"kubernetes.io/projected/7018648b-8a68-423d-9532-8222c0c4b6cc-kube-api-access-b7n4b\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354632 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.354649 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7018648b-8a68-423d-9532-8222c0c4b6cc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.355082 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0333efd0-887f-4c8f-a25f-b6e7f56068c0" (UID: "0333efd0-887f-4c8f-a25f-b6e7f56068c0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.355255 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "86ae0309-cead-43f3-bf5b-617d972977c5" (UID: "86ae0309-cead-43f3-bf5b-617d972977c5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.356083 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8062643-e2e7-45e4-aab3-e39d07b2946c" (UID: "e8062643-e2e7-45e4-aab3-e39d07b2946c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.358022 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl" (OuterVolumeSpecName: "kube-api-access-jjqrl") pod "0333efd0-887f-4c8f-a25f-b6e7f56068c0" (UID: "0333efd0-887f-4c8f-a25f-b6e7f56068c0"). InnerVolumeSpecName "kube-api-access-jjqrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.358325 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn" (OuterVolumeSpecName: "kube-api-access-kf4pn") pod "86ae0309-cead-43f3-bf5b-617d972977c5" (UID: "86ae0309-cead-43f3-bf5b-617d972977c5"). InnerVolumeSpecName "kube-api-access-kf4pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.358918 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m" (OuterVolumeSpecName: "kube-api-access-bmd7m") pod "e8062643-e2e7-45e4-aab3-e39d07b2946c" (UID: "e8062643-e2e7-45e4-aab3-e39d07b2946c"). InnerVolumeSpecName "kube-api-access-bmd7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.432401 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.432432 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz" event={"ID":"4d1dac7e-d83f-4d2f-bc9f-fd197a406a47","Type":"ContainerDied","Data":"080ef8544a85c0e224982696a01bbefdbc8f7dc2eae2581b47735a2776cebc64"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.432501 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="080ef8544a85c0e224982696a01bbefdbc8f7dc2eae2581b47735a2776cebc64" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.434448 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sdvb7" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.434471 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sdvb7" event={"ID":"0333efd0-887f-4c8f-a25f-b6e7f56068c0","Type":"ContainerDied","Data":"e6b7174a9fb2c630f463b69a5da23cf1f65354b6851c326238c3da90f542ddd9"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.434505 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6b7174a9fb2c630f463b69a5da23cf1f65354b6851c326238c3da90f542ddd9" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.436246 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-a154-account-create-update-l267s" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.436251 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-a154-account-create-update-l267s" event={"ID":"e8062643-e2e7-45e4-aab3-e39d07b2946c","Type":"ContainerDied","Data":"aea8ccd8a21d955d1ef0e9f8625b63e982a02cc164b33fd083b26a6edebb7c15"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.436283 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aea8ccd8a21d955d1ef0e9f8625b63e982a02cc164b33fd083b26a6edebb7c15" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.437909 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7nwwb" event={"ID":"7018648b-8a68-423d-9532-8222c0c4b6cc","Type":"ContainerDied","Data":"58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.438156 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58a22a238ebd85ba6ca806ca18c6d654f6c84502a5925b08dd1f27de3552d7ea" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.437915 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7nwwb" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.439363 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-5hl62" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.439394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cfee-account-create-update-5hl62" event={"ID":"86ae0309-cead-43f3-bf5b-617d972977c5","Type":"ContainerDied","Data":"7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.439415 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bfb8649804436afd86f134397b840561b56c96ef3f11f6a4fc29407e4f017bd" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.440646 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j4njn" event={"ID":"e67bd911-5506-4636-8b80-9f5a73e0c99f","Type":"ContainerDied","Data":"40ed47ea04c4216bd0f780793b296ae1383073439d898f7cae9b961a5754c9cf"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.440670 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40ed47ea04c4216bd0f780793b296ae1383073439d898f7cae9b961a5754c9cf" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.440712 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j4njn" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.449900 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f67f-account-create-update-lmxm7" event={"ID":"4d3b9c66-5911-42ee-ad3d-c746b8aa5364","Type":"ContainerDied","Data":"c379874404a8707c272b309abb9a69d2de4332d2beb0d5dd9373e06f33d15fd9"} Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.450000 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c379874404a8707c272b309abb9a69d2de4332d2beb0d5dd9373e06f33d15fd9" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.450082 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmxm7" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.469490 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0333efd0-887f-4c8f-a25f-b6e7f56068c0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.473329 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8062643-e2e7-45e4-aab3-e39d07b2946c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.475704 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf4pn\" (UniqueName: \"kubernetes.io/projected/86ae0309-cead-43f3-bf5b-617d972977c5-kube-api-access-kf4pn\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.475740 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmd7m\" (UniqueName: \"kubernetes.io/projected/e8062643-e2e7-45e4-aab3-e39d07b2946c-kube-api-access-bmd7m\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.475754 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86ae0309-cead-43f3-bf5b-617d972977c5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:04 crc kubenswrapper[4852]: I0129 11:00:04.475768 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjqrl\" (UniqueName: \"kubernetes.io/projected/0333efd0-887f-4c8f-a25f-b6e7f56068c0-kube-api-access-jjqrl\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:05 crc kubenswrapper[4852]: I0129 11:00:05.478374 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" path="/var/lib/kubelet/pods/d8bf3dbd-226b-4092-b1b0-29dc8696c0f0/volumes" Jan 29 11:00:06 crc kubenswrapper[4852]: I0129 11:00:06.474541 4852 generic.go:334] "Generic (PLEG): container finished" podID="6f5dfb94-d484-48d3-ab84-70c647e30d2e" containerID="a05938cf7aa43400ac64d077c4f27bd8aac2609da89a2cc7553fff6b92d2605d" exitCode=0 Jan 29 11:00:06 crc kubenswrapper[4852]: I0129 11:00:06.474744 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kbmbt" event={"ID":"6f5dfb94-d484-48d3-ab84-70c647e30d2e","Type":"ContainerDied","Data":"a05938cf7aa43400ac64d077c4f27bd8aac2609da89a2cc7553fff6b92d2605d"} Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.832663 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.932988 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933049 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933082 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933179 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7lvq\" (UniqueName: \"kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933207 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933249 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.933335 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.934360 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.934673 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.939409 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq" (OuterVolumeSpecName: "kube-api-access-z7lvq") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "kube-api-access-z7lvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.941725 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:07 crc kubenswrapper[4852]: E0129 11:00:07.967276 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts podName:6f5dfb94-d484-48d3-ab84-70c647e30d2e nodeName:}" failed. No retries permitted until 2026-01-29 11:00:08.467248081 +0000 UTC m=+1105.684579235 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "scripts" (UniqueName: "kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e") : error deleting /var/lib/kubelet/pods/6f5dfb94-d484-48d3-ab84-70c647e30d2e/volume-subpaths: remove /var/lib/kubelet/pods/6f5dfb94-d484-48d3-ab84-70c647e30d2e/volume-subpaths: no such file or directory Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.967279 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:07 crc kubenswrapper[4852]: I0129 11:00:07.969531 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035601 4852 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035641 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035654 4852 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6f5dfb94-d484-48d3-ab84-70c647e30d2e-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035667 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7lvq\" (UniqueName: \"kubernetes.io/projected/6f5dfb94-d484-48d3-ab84-70c647e30d2e-kube-api-access-z7lvq\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035680 4852 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.035712 4852 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6f5dfb94-d484-48d3-ab84-70c647e30d2e-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.192367 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.279670 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-h2ljt"] Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280024 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67bd911-5506-4636-8b80-9f5a73e0c99f" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280038 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67bd911-5506-4636-8b80-9f5a73e0c99f" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280049 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86ae0309-cead-43f3-bf5b-617d972977c5" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280055 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="86ae0309-cead-43f3-bf5b-617d972977c5" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280065 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7018648b-8a68-423d-9532-8222c0c4b6cc" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280071 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7018648b-8a68-423d-9532-8222c0c4b6cc" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280083 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="init" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280088 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="init" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280105 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" containerName="collect-profiles" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280111 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" containerName="collect-profiles" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280122 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8062643-e2e7-45e4-aab3-e39d07b2946c" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280127 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8062643-e2e7-45e4-aab3-e39d07b2946c" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280137 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0333efd0-887f-4c8f-a25f-b6e7f56068c0" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280142 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0333efd0-887f-4c8f-a25f-b6e7f56068c0" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280151 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="dnsmasq-dns" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280156 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="dnsmasq-dns" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280163 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3b9c66-5911-42ee-ad3d-c746b8aa5364" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280170 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3b9c66-5911-42ee-ad3d-c746b8aa5364" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280177 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f5dfb94-d484-48d3-ab84-70c647e30d2e" containerName="swift-ring-rebalance" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280183 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f5dfb94-d484-48d3-ab84-70c647e30d2e" containerName="swift-ring-rebalance" Jan 29 11:00:08 crc kubenswrapper[4852]: E0129 11:00:08.280191 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a73aa0-7309-4b61-8bdf-7b5597c4f257" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280197 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a73aa0-7309-4b61-8bdf-7b5597c4f257" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280334 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e67bd911-5506-4636-8b80-9f5a73e0c99f" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280348 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7018648b-8a68-423d-9532-8222c0c4b6cc" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280356 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0333efd0-887f-4c8f-a25f-b6e7f56068c0" containerName="mariadb-database-create" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280363 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8062643-e2e7-45e4-aab3-e39d07b2946c" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280372 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="29a73aa0-7309-4b61-8bdf-7b5597c4f257" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280382 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f5dfb94-d484-48d3-ab84-70c647e30d2e" containerName="swift-ring-rebalance" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280388 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="86ae0309-cead-43f3-bf5b-617d972977c5" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280398 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" containerName="collect-profiles" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280405 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bf3dbd-226b-4092-b1b0-29dc8696c0f0" containerName="dnsmasq-dns" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280414 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d3b9c66-5911-42ee-ad3d-c746b8aa5364" containerName="mariadb-account-create-update" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.280946 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.283362 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.289721 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-h2ljt"] Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.345219 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2pfc\" (UniqueName: \"kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.345306 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.447043 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.447217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2pfc\" (UniqueName: \"kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.447753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.476151 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2pfc\" (UniqueName: \"kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc\") pod \"root-account-create-update-h2ljt\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.489758 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kbmbt" event={"ID":"6f5dfb94-d484-48d3-ab84-70c647e30d2e","Type":"ContainerDied","Data":"958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9"} Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.489798 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="958deb6eed20e0c0b79ed653618d8d8587eb9a153095f3641b83fee86a1ba0b9" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.489861 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kbmbt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.548793 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") pod \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\" (UID: \"6f5dfb94-d484-48d3-ab84-70c647e30d2e\") " Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.549284 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts" (OuterVolumeSpecName: "scripts") pod "6f5dfb94-d484-48d3-ab84-70c647e30d2e" (UID: "6f5dfb94-d484-48d3-ab84-70c647e30d2e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.549652 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f5dfb94-d484-48d3-ab84-70c647e30d2e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.607856 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.854309 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.871623 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"swift-storage-0\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " pod="openstack/swift-storage-0" Jan 29 11:00:08 crc kubenswrapper[4852]: I0129 11:00:08.909946 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 11:00:09 crc kubenswrapper[4852]: W0129 11:00:09.089609 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f96a744_12c9_4dd2_908d_d1984b7fccfa.slice/crio-5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524 WatchSource:0}: Error finding container 5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524: Status 404 returned error can't find the container with id 5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524 Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.090820 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-h2ljt"] Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.443428 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 11:00:09 crc kubenswrapper[4852]: W0129 11:00:09.450738 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8111fd43_32e9_4654_bf8e_444fbce4933a.slice/crio-cfd5b611022855b6aed693e3fd283814cd3a136d7984ad171086e9af275242c4 WatchSource:0}: Error finding container cfd5b611022855b6aed693e3fd283814cd3a136d7984ad171086e9af275242c4: Status 404 returned error can't find the container with id cfd5b611022855b6aed693e3fd283814cd3a136d7984ad171086e9af275242c4 Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.496512 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"cfd5b611022855b6aed693e3fd283814cd3a136d7984ad171086e9af275242c4"} Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.498106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-h2ljt" event={"ID":"4f96a744-12c9-4dd2-908d-d1984b7fccfa","Type":"ContainerStarted","Data":"65d76a68381aa7646e3657d86bd34071b3d95cc3ff4545da7a03719e60815981"} Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.498136 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-h2ljt" event={"ID":"4f96a744-12c9-4dd2-908d-d1984b7fccfa","Type":"ContainerStarted","Data":"5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524"} Jan 29 11:00:09 crc kubenswrapper[4852]: I0129 11:00:09.518402 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-h2ljt" podStartSLOduration=1.518379759 podStartE2EDuration="1.518379759s" podCreationTimestamp="2026-01-29 11:00:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:09.510181409 +0000 UTC m=+1106.727512553" watchObservedRunningTime="2026-01-29 11:00:09.518379759 +0000 UTC m=+1106.735710893" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.398980 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lrj96"] Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.399948 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.404350 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.404746 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-php2d" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.415454 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lrj96"] Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.483691 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.483955 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.483975 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.484101 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jntxm\" (UniqueName: \"kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.508536 4852 generic.go:334] "Generic (PLEG): container finished" podID="4f96a744-12c9-4dd2-908d-d1984b7fccfa" containerID="65d76a68381aa7646e3657d86bd34071b3d95cc3ff4545da7a03719e60815981" exitCode=0 Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.508593 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-h2ljt" event={"ID":"4f96a744-12c9-4dd2-908d-d1984b7fccfa","Type":"ContainerDied","Data":"65d76a68381aa7646e3657d86bd34071b3d95cc3ff4545da7a03719e60815981"} Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.585687 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jntxm\" (UniqueName: \"kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.585770 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.585797 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.585819 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.595060 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.600034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.600524 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.605418 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jntxm\" (UniqueName: \"kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm\") pod \"glance-db-sync-lrj96\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:10 crc kubenswrapper[4852]: I0129 11:00:10.721539 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.275861 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lrj96"] Jan 29 11:00:11 crc kubenswrapper[4852]: W0129 11:00:11.280512 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3370d9c7_8c64_443a_82fe_b03172ce44e4.slice/crio-9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78 WatchSource:0}: Error finding container 9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78: Status 404 returned error can't find the container with id 9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78 Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.532906 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lrj96" event={"ID":"3370d9c7-8c64-443a-82fe-b03172ce44e4","Type":"ContainerStarted","Data":"9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78"} Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.537186 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"ac4d599adbc91bf89af64ad4f64d8683dca701a7383cd3db396529b7ca9ceeec"} Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.537218 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"e7228e8da4a774db3b5290c30cdafed9405b8034f11e829f2c3ac803d946e4c3"} Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.537230 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"7d958b8db8a90d2d6554c92f9750a01a932e5397d4686c338494a75d5e717c07"} Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.537241 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"761316244eb0a26173100890d918cc7c1799abc67c0ec48e5c29ebc05dc7ed29"} Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.834774 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.926388 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts\") pod \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.926612 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2pfc\" (UniqueName: \"kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc\") pod \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\" (UID: \"4f96a744-12c9-4dd2-908d-d1984b7fccfa\") " Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.929173 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4f96a744-12c9-4dd2-908d-d1984b7fccfa" (UID: "4f96a744-12c9-4dd2-908d-d1984b7fccfa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:11 crc kubenswrapper[4852]: I0129 11:00:11.936150 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc" (OuterVolumeSpecName: "kube-api-access-b2pfc") pod "4f96a744-12c9-4dd2-908d-d1984b7fccfa" (UID: "4f96a744-12c9-4dd2-908d-d1984b7fccfa"). InnerVolumeSpecName "kube-api-access-b2pfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:12 crc kubenswrapper[4852]: I0129 11:00:12.029706 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f96a744-12c9-4dd2-908d-d1984b7fccfa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:12 crc kubenswrapper[4852]: I0129 11:00:12.029745 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2pfc\" (UniqueName: \"kubernetes.io/projected/4f96a744-12c9-4dd2-908d-d1984b7fccfa-kube-api-access-b2pfc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:12 crc kubenswrapper[4852]: I0129 11:00:12.559542 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-h2ljt" event={"ID":"4f96a744-12c9-4dd2-908d-d1984b7fccfa","Type":"ContainerDied","Data":"5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524"} Jan 29 11:00:12 crc kubenswrapper[4852]: I0129 11:00:12.559993 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cc76cc06d45dc23cce100ab490d8bca02c1ee23deacd0678950b530ecb93524" Jan 29 11:00:12 crc kubenswrapper[4852]: I0129 11:00:12.559693 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-h2ljt" Jan 29 11:00:14 crc kubenswrapper[4852]: I0129 11:00:14.574897 4852 generic.go:334] "Generic (PLEG): container finished" podID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerID="91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a" exitCode=0 Jan 29 11:00:14 crc kubenswrapper[4852]: I0129 11:00:14.574980 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerDied","Data":"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a"} Jan 29 11:00:14 crc kubenswrapper[4852]: I0129 11:00:14.576910 4852 generic.go:334] "Generic (PLEG): container finished" podID="1ab8189f-e95a-47b5-a130-5404901974e2" containerID="c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28" exitCode=0 Jan 29 11:00:14 crc kubenswrapper[4852]: I0129 11:00:14.576942 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerDied","Data":"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.328168 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-67sl6" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" probeResult="failure" output=< Jan 29 11:00:15 crc kubenswrapper[4852]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 11:00:15 crc kubenswrapper[4852]: > Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.342484 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.351607 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.569917 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-67sl6-config-c4mxd"] Jan 29 11:00:15 crc kubenswrapper[4852]: E0129 11:00:15.570319 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f96a744-12c9-4dd2-908d-d1984b7fccfa" containerName="mariadb-account-create-update" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.570342 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f96a744-12c9-4dd2-908d-d1984b7fccfa" containerName="mariadb-account-create-update" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.570547 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f96a744-12c9-4dd2-908d-d1984b7fccfa" containerName="mariadb-account-create-update" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.571539 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.577550 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.580054 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67sl6-config-c4mxd"] Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.586856 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.586935 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.586972 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn7dt\" (UniqueName: \"kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.587024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.587066 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.587098 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.589320 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerStarted","Data":"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.589962 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.594092 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerStarted","Data":"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.594599 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.618952 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"91fe6b60918e96d60b8de169eade7e1727fc0ccc381141ea808831c289639ab8"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.619359 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"7d276e426302c43e751c21a713a36f854cb3218920539f9fed9da38f45d520ce"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.619371 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"118a6805e58988df9a38c6169f10a35d7f949be36a831d95306796e4b1348a45"} Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.631033 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=51.560187192 podStartE2EDuration="1m0.631013011s" podCreationTimestamp="2026-01-29 10:59:15 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.27343021 +0000 UTC m=+1068.490761344" lastFinishedPulling="2026-01-29 10:59:40.344256018 +0000 UTC m=+1077.561587163" observedRunningTime="2026-01-29 11:00:15.624254305 +0000 UTC m=+1112.841585429" watchObservedRunningTime="2026-01-29 11:00:15.631013011 +0000 UTC m=+1112.848344145" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.661433 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=52.017121487 podStartE2EDuration="1m0.661413707s" podCreationTimestamp="2026-01-29 10:59:15 +0000 UTC" firstStartedPulling="2026-01-29 10:59:31.273171814 +0000 UTC m=+1068.490502948" lastFinishedPulling="2026-01-29 10:59:39.917464034 +0000 UTC m=+1077.134795168" observedRunningTime="2026-01-29 11:00:15.657357617 +0000 UTC m=+1112.874688751" watchObservedRunningTime="2026-01-29 11:00:15.661413707 +0000 UTC m=+1112.878744841" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688493 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn7dt\" (UniqueName: \"kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688620 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688646 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688665 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688701 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.688769 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.690698 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.690738 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.693711 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.694283 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.694623 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.713837 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn7dt\" (UniqueName: \"kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt\") pod \"ovn-controller-67sl6-config-c4mxd\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:15 crc kubenswrapper[4852]: I0129 11:00:15.903245 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:16 crc kubenswrapper[4852]: I0129 11:00:16.421205 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-67sl6-config-c4mxd"] Jan 29 11:00:16 crc kubenswrapper[4852]: W0129 11:00:16.427921 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0df72737_2e4a_48dd_bf56_c477a2ba30ee.slice/crio-dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0 WatchSource:0}: Error finding container dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0: Status 404 returned error can't find the container with id dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0 Jan 29 11:00:16 crc kubenswrapper[4852]: I0129 11:00:16.635458 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"1da31bf3d6d70a3d5937cf86e2d07be3913f158a9179ef5de6c23c100b7e5517"} Jan 29 11:00:16 crc kubenswrapper[4852]: I0129 11:00:16.641070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6-config-c4mxd" event={"ID":"0df72737-2e4a-48dd-bf56-c477a2ba30ee","Type":"ContainerStarted","Data":"dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0"} Jan 29 11:00:17 crc kubenswrapper[4852]: I0129 11:00:17.656642 4852 generic.go:334] "Generic (PLEG): container finished" podID="0df72737-2e4a-48dd-bf56-c477a2ba30ee" containerID="f58ebcfb0f54e201d9318554d9cf3f7b0ed67554f4ba40579db34622110ae1b7" exitCode=0 Jan 29 11:00:17 crc kubenswrapper[4852]: I0129 11:00:17.656707 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6-config-c4mxd" event={"ID":"0df72737-2e4a-48dd-bf56-c477a2ba30ee","Type":"ContainerDied","Data":"f58ebcfb0f54e201d9318554d9cf3f7b0ed67554f4ba40579db34622110ae1b7"} Jan 29 11:00:17 crc kubenswrapper[4852]: I0129 11:00:17.672054 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"45528cdeb850649069e891209d9ca38b5ce8b5d0110cd02108c6b5f6abe281fc"} Jan 29 11:00:17 crc kubenswrapper[4852]: I0129 11:00:17.672104 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"1e2b102b24f601e91c5e39ae16f39b46498693458c4fead72ba2b77aa8d49771"} Jan 29 11:00:17 crc kubenswrapper[4852]: I0129 11:00:17.672118 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"dde3e5fe58352e0cc69a3b45408e08dda15923a3ecec816e83910494c6735af4"} Jan 29 11:00:18 crc kubenswrapper[4852]: I0129 11:00:18.688570 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"2fb8dded90eb3884703a4aa309816c0eff0cdf02427346e9d34e49253bf4d662"} Jan 29 11:00:18 crc kubenswrapper[4852]: I0129 11:00:18.688832 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"907bf322df4410dc6b7ff975343a603805ddab5bdea051b8b9a9717eb895ca80"} Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.818372 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976649 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976701 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976760 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976786 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run" (OuterVolumeSpecName: "var-run") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976833 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976917 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn7dt\" (UniqueName: \"kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976977 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.976992 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn\") pod \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\" (UID: \"0df72737-2e4a-48dd-bf56-c477a2ba30ee\") " Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977116 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977310 4852 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977321 4852 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977330 4852 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/0df72737-2e4a-48dd-bf56-c477a2ba30ee-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977486 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.977734 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts" (OuterVolumeSpecName: "scripts") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:19 crc kubenswrapper[4852]: I0129 11:00:19.982695 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt" (OuterVolumeSpecName: "kube-api-access-sn7dt") pod "0df72737-2e4a-48dd-bf56-c477a2ba30ee" (UID: "0df72737-2e4a-48dd-bf56-c477a2ba30ee"). InnerVolumeSpecName "kube-api-access-sn7dt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.079443 4852 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.079933 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn7dt\" (UniqueName: \"kubernetes.io/projected/0df72737-2e4a-48dd-bf56-c477a2ba30ee-kube-api-access-sn7dt\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.080001 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0df72737-2e4a-48dd-bf56-c477a2ba30ee-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.315783 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-67sl6" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.720644 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"ce1f7c6d351cc99e3313d53f3d8f5133e907d6c87aab097b279a18222b571462"} Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.723889 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6-config-c4mxd" event={"ID":"0df72737-2e4a-48dd-bf56-c477a2ba30ee","Type":"ContainerDied","Data":"dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0"} Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.723929 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6-config-c4mxd" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.723934 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dba1d1c9a969d1ec38da19967874031873013dec450f5826f1ee2905fdb3bcf0" Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.927342 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-67sl6-config-c4mxd"] Jan 29 11:00:20 crc kubenswrapper[4852]: I0129 11:00:20.939639 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-67sl6-config-c4mxd"] Jan 29 11:00:21 crc kubenswrapper[4852]: I0129 11:00:21.474110 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0df72737-2e4a-48dd-bf56-c477a2ba30ee" path="/var/lib/kubelet/pods/0df72737-2e4a-48dd-bf56-c477a2ba30ee/volumes" Jan 29 11:00:26 crc kubenswrapper[4852]: I0129 11:00:26.971958 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.044739 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.353403 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-7sq8p"] Jan 29 11:00:27 crc kubenswrapper[4852]: E0129 11:00:27.353814 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df72737-2e4a-48dd-bf56-c477a2ba30ee" containerName="ovn-config" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.353838 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df72737-2e4a-48dd-bf56-c477a2ba30ee" containerName="ovn-config" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.354017 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df72737-2e4a-48dd-bf56-c477a2ba30ee" containerName="ovn-config" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.354558 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.401522 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7sq8p"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.418074 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5699-account-create-update-wncww"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.419203 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.421394 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.442464 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5699-account-create-update-wncww"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.461147 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-jmbw4"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.462264 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.480904 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jmbw4"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.499978 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.500023 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxqqk\" (UniqueName: \"kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.500058 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.500113 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmx4z\" (UniqueName: \"kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.572453 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-cd05-account-create-update-27smf"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.573709 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.581635 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.586486 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-cd05-account-create-update-27smf"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601104 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkpk6\" (UniqueName: \"kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601146 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601166 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxqqk\" (UniqueName: \"kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601191 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601208 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.601258 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmx4z\" (UniqueName: \"kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.602201 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.602417 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.616025 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmx4z\" (UniqueName: \"kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z\") pod \"cinder-5699-account-create-update-wncww\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.618207 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxqqk\" (UniqueName: \"kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk\") pod \"cinder-db-create-7sq8p\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.650992 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-jqwsq"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.653777 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.666893 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-jqwsq"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.703602 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkpk6\" (UniqueName: \"kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.703705 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.704070 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trfxw\" (UniqueName: \"kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.704469 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.704671 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.723893 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkpk6\" (UniqueName: \"kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6\") pod \"barbican-db-create-jmbw4\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.727152 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-78jgw"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.728188 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.731437 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.731637 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.732090 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-99xbg" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.732205 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.738275 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-78jgw"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.774473 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4bcf-account-create-update-g4lz6"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.775863 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.779765 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.790659 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4bcf-account-create-update-g4lz6"] Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.810883 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.811189 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trfxw\" (UniqueName: \"kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.811321 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.811562 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6djw2\" (UniqueName: \"kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.812523 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.823832 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.841312 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.845936 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trfxw\" (UniqueName: \"kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw\") pod \"barbican-cd05-account-create-update-27smf\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.850335 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.854994 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerStarted","Data":"73859270b6703e319bb738155b60b5da8025987a2cbf2f4800261c79942db2e5"} Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.894149 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.897642 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=29.28763271 podStartE2EDuration="36.897623028s" podCreationTimestamp="2026-01-29 10:59:51 +0000 UTC" firstStartedPulling="2026-01-29 11:00:09.452228546 +0000 UTC m=+1106.669559680" lastFinishedPulling="2026-01-29 11:00:17.062218864 +0000 UTC m=+1114.279549998" observedRunningTime="2026-01-29 11:00:27.893389054 +0000 UTC m=+1125.110720188" watchObservedRunningTime="2026-01-29 11:00:27.897623028 +0000 UTC m=+1125.114954162" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.919484 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.919801 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.919837 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.919889 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.919944 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j4r9\" (UniqueName: \"kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.920035 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7spw\" (UniqueName: \"kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.920057 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6djw2\" (UniqueName: \"kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.920985 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.946649 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6djw2\" (UniqueName: \"kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2\") pod \"neutron-db-create-jqwsq\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:27 crc kubenswrapper[4852]: I0129 11:00:27.972431 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.040081 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7spw\" (UniqueName: \"kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.040131 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.042115 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.042176 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.042221 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j4r9\" (UniqueName: \"kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.041967 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.048141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.059084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7spw\" (UniqueName: \"kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw\") pod \"neutron-4bcf-account-create-update-g4lz6\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.063700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.071172 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j4r9\" (UniqueName: \"kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9\") pod \"keystone-db-sync-78jgw\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.122104 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.317018 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.333155 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.351048 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jmbw4"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.351395 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.354135 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.393091 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.445221 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7sq8p"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.458188 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5699-account-create-update-wncww"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459071 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459099 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459120 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459232 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459285 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7dg8\" (UniqueName: \"kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.459316 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.560971 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.561061 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7dg8\" (UniqueName: \"kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.561090 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.561137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.561160 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.561185 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.562457 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.572597 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.572601 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.572681 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.573176 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.593906 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7dg8\" (UniqueName: \"kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8\") pod \"dnsmasq-dns-6d5b6d6b67-wm7cb\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.833265 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-cd05-account-create-update-27smf"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.895043 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.899462 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5699-account-create-update-wncww" event={"ID":"d010e854-5e87-4a27-b194-c3ced771d680","Type":"ContainerStarted","Data":"aaa67bc497ed14030c557d246990a4b25295868ce6b77d9d0fb9b2cd5f846d7e"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.899513 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5699-account-create-update-wncww" event={"ID":"d010e854-5e87-4a27-b194-c3ced771d680","Type":"ContainerStarted","Data":"a899137902958c9682109ae4f5857da99d26200e7bf87497c1174daf5204f21f"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.908919 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7sq8p" event={"ID":"487b0984-bae1-4730-bfd0-afe920ec974e","Type":"ContainerStarted","Data":"028709c4503babddc3871b55ba930b2f3b728ca6b6eab06a923d621095f431b9"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.908963 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7sq8p" event={"ID":"487b0984-bae1-4730-bfd0-afe920ec974e","Type":"ContainerStarted","Data":"9b8fe545c4eda778a9fd82f8fca7fa3b10e7f867abfdf28664bd01ac827a1a97"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.910487 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-cd05-account-create-update-27smf" event={"ID":"8a595383-5c9f-4b27-9612-4a1408221623","Type":"ContainerStarted","Data":"178f1c2eeb6fd08401fed4eb3aa6161545aadca0256078eb0fc0ac4c4941fde3"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.912567 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jmbw4" event={"ID":"8eb503ba-654f-4997-8581-d2ea619a7b68","Type":"ContainerStarted","Data":"618d49b42c8b4f757aea30017d84f7a653b8d140ff6d575dc8f81c48d2cdf5f7"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.912618 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jmbw4" event={"ID":"8eb503ba-654f-4997-8581-d2ea619a7b68","Type":"ContainerStarted","Data":"c09201cfbf1be0835b90438c3f114d91222cd0973446480c56ee977f7c0c7d8c"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.933815 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-5699-account-create-update-wncww" podStartSLOduration=1.933792907 podStartE2EDuration="1.933792907s" podCreationTimestamp="2026-01-29 11:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:28.923212018 +0000 UTC m=+1126.140543182" watchObservedRunningTime="2026-01-29 11:00:28.933792907 +0000 UTC m=+1126.151124041" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.942490 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lrj96" event={"ID":"3370d9c7-8c64-443a-82fe-b03172ce44e4","Type":"ContainerStarted","Data":"f85d4051ca659acb4d0c6433794c8cd8c38b06955cbcf113f383525e8fba6fcf"} Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.951332 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4bcf-account-create-update-g4lz6"] Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.957315 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-7sq8p" podStartSLOduration=1.956966826 podStartE2EDuration="1.956966826s" podCreationTimestamp="2026-01-29 11:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:28.938970874 +0000 UTC m=+1126.156301998" watchObservedRunningTime="2026-01-29 11:00:28.956966826 +0000 UTC m=+1126.174297960" Jan 29 11:00:28 crc kubenswrapper[4852]: I0129 11:00:28.993833 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-jmbw4" podStartSLOduration=1.99380751 podStartE2EDuration="1.99380751s" podCreationTimestamp="2026-01-29 11:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:28.959022956 +0000 UTC m=+1126.176354090" watchObservedRunningTime="2026-01-29 11:00:28.99380751 +0000 UTC m=+1126.211138674" Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.007685 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lrj96" podStartSLOduration=2.917803695 podStartE2EDuration="19.00766692s" podCreationTimestamp="2026-01-29 11:00:10 +0000 UTC" firstStartedPulling="2026-01-29 11:00:11.282864322 +0000 UTC m=+1108.500195466" lastFinishedPulling="2026-01-29 11:00:27.372727557 +0000 UTC m=+1124.590058691" observedRunningTime="2026-01-29 11:00:28.975855169 +0000 UTC m=+1126.193186303" watchObservedRunningTime="2026-01-29 11:00:29.00766692 +0000 UTC m=+1126.224998044" Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.016965 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-78jgw"] Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.076818 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-jqwsq"] Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.514495 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.952001 4852 generic.go:334] "Generic (PLEG): container finished" podID="8a595383-5c9f-4b27-9612-4a1408221623" containerID="c38cfe0f0915cd81beb71287177a725e2ee7ef8ebc102e10838801dde8803cf8" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.952641 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-cd05-account-create-update-27smf" event={"ID":"8a595383-5c9f-4b27-9612-4a1408221623","Type":"ContainerDied","Data":"c38cfe0f0915cd81beb71287177a725e2ee7ef8ebc102e10838801dde8803cf8"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.956805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-78jgw" event={"ID":"4a0e9617-a546-43d8-9ce8-7ff8e22004e8","Type":"ContainerStarted","Data":"db32618a75ea505dd0a85a41114092f3e50819c735a6765fc94f5856d043d6c5"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.958836 4852 generic.go:334] "Generic (PLEG): container finished" podID="8eb503ba-654f-4997-8581-d2ea619a7b68" containerID="618d49b42c8b4f757aea30017d84f7a653b8d140ff6d575dc8f81c48d2cdf5f7" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.958879 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jmbw4" event={"ID":"8eb503ba-654f-4997-8581-d2ea619a7b68","Type":"ContainerDied","Data":"618d49b42c8b4f757aea30017d84f7a653b8d140ff6d575dc8f81c48d2cdf5f7"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.964077 4852 generic.go:334] "Generic (PLEG): container finished" podID="815af2ae-6f87-4b01-9712-43fb3c70f9a7" containerID="7379f76291c94d8138e885354e745cb48b5bb3976bd4637906d7a83e5afd89e9" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.964124 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4bcf-account-create-update-g4lz6" event={"ID":"815af2ae-6f87-4b01-9712-43fb3c70f9a7","Type":"ContainerDied","Data":"7379f76291c94d8138e885354e745cb48b5bb3976bd4637906d7a83e5afd89e9"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.964142 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4bcf-account-create-update-g4lz6" event={"ID":"815af2ae-6f87-4b01-9712-43fb3c70f9a7","Type":"ContainerStarted","Data":"98bd312affa5d923610c8ce2b25ed2aa55467ed830b8f540b6996cd7d56cc64d"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.965997 4852 generic.go:334] "Generic (PLEG): container finished" podID="9dcddeb0-9a29-43ba-a4a3-50c920d2603f" containerID="70a827e98d10c665afd0a9c14f88cac00a5f0d5fc4a7e56e136ce0c3ffd21760" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.966036 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jqwsq" event={"ID":"9dcddeb0-9a29-43ba-a4a3-50c920d2603f","Type":"ContainerDied","Data":"70a827e98d10c665afd0a9c14f88cac00a5f0d5fc4a7e56e136ce0c3ffd21760"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.966403 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jqwsq" event={"ID":"9dcddeb0-9a29-43ba-a4a3-50c920d2603f","Type":"ContainerStarted","Data":"2fc7429e1963d8b9ad4d1f018f085915ffe599893a6de9ac7adcc49a17ebe9ec"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.971890 4852 generic.go:334] "Generic (PLEG): container finished" podID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerID="547db02870572018ce399db2a9a2c3e8285391a48dca05a9b1d89ef5d427f1e2" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.972054 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" event={"ID":"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4","Type":"ContainerDied","Data":"547db02870572018ce399db2a9a2c3e8285391a48dca05a9b1d89ef5d427f1e2"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.972089 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" event={"ID":"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4","Type":"ContainerStarted","Data":"7b8f5c2c9df49f93f9d138392afecd12635087155b03849b7b00ebe5a3b6760b"} Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.977023 4852 generic.go:334] "Generic (PLEG): container finished" podID="d010e854-5e87-4a27-b194-c3ced771d680" containerID="aaa67bc497ed14030c557d246990a4b25295868ce6b77d9d0fb9b2cd5f846d7e" exitCode=0 Jan 29 11:00:29 crc kubenswrapper[4852]: I0129 11:00:29.977179 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5699-account-create-update-wncww" event={"ID":"d010e854-5e87-4a27-b194-c3ced771d680","Type":"ContainerDied","Data":"aaa67bc497ed14030c557d246990a4b25295868ce6b77d9d0fb9b2cd5f846d7e"} Jan 29 11:00:30 crc kubenswrapper[4852]: I0129 11:00:30.006261 4852 generic.go:334] "Generic (PLEG): container finished" podID="487b0984-bae1-4730-bfd0-afe920ec974e" containerID="028709c4503babddc3871b55ba930b2f3b728ca6b6eab06a923d621095f431b9" exitCode=0 Jan 29 11:00:30 crc kubenswrapper[4852]: I0129 11:00:30.007052 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7sq8p" event={"ID":"487b0984-bae1-4730-bfd0-afe920ec974e","Type":"ContainerDied","Data":"028709c4503babddc3871b55ba930b2f3b728ca6b6eab06a923d621095f431b9"} Jan 29 11:00:30 crc kubenswrapper[4852]: I0129 11:00:30.019671 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:00:30 crc kubenswrapper[4852]: I0129 11:00:30.019721 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:00:31 crc kubenswrapper[4852]: I0129 11:00:31.018283 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" event={"ID":"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4","Type":"ContainerStarted","Data":"746f1dd9de205fcb69bb9ffeae3ec14fe781bcf2e7c81382b54f3b30ebbced42"} Jan 29 11:00:31 crc kubenswrapper[4852]: I0129 11:00:31.043102 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" podStartSLOduration=3.043082402 podStartE2EDuration="3.043082402s" podCreationTimestamp="2026-01-29 11:00:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:31.035457794 +0000 UTC m=+1128.252788938" watchObservedRunningTime="2026-01-29 11:00:31.043082402 +0000 UTC m=+1128.260413546" Jan 29 11:00:32 crc kubenswrapper[4852]: I0129 11:00:32.025516 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.896360 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.902615 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.943462 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.949221 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.966594 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:33 crc kubenswrapper[4852]: I0129 11:00:33.974469 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.004647 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts\") pod \"d010e854-5e87-4a27-b194-c3ced771d680\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.004757 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts\") pod \"8eb503ba-654f-4997-8581-d2ea619a7b68\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006117 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts\") pod \"487b0984-bae1-4730-bfd0-afe920ec974e\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006152 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts\") pod \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006231 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trfxw\" (UniqueName: \"kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw\") pod \"8a595383-5c9f-4b27-9612-4a1408221623\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006264 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7spw\" (UniqueName: \"kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw\") pod \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\" (UID: \"815af2ae-6f87-4b01-9712-43fb3c70f9a7\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006315 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts\") pod \"8a595383-5c9f-4b27-9612-4a1408221623\" (UID: \"8a595383-5c9f-4b27-9612-4a1408221623\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006351 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkpk6\" (UniqueName: \"kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6\") pod \"8eb503ba-654f-4997-8581-d2ea619a7b68\" (UID: \"8eb503ba-654f-4997-8581-d2ea619a7b68\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006388 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmx4z\" (UniqueName: \"kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z\") pod \"d010e854-5e87-4a27-b194-c3ced771d680\" (UID: \"d010e854-5e87-4a27-b194-c3ced771d680\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.006409 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxqqk\" (UniqueName: \"kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk\") pod \"487b0984-bae1-4730-bfd0-afe920ec974e\" (UID: \"487b0984-bae1-4730-bfd0-afe920ec974e\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.009606 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "815af2ae-6f87-4b01-9712-43fb3c70f9a7" (UID: "815af2ae-6f87-4b01-9712-43fb3c70f9a7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.010024 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a595383-5c9f-4b27-9612-4a1408221623" (UID: "8a595383-5c9f-4b27-9612-4a1408221623"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.010543 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d010e854-5e87-4a27-b194-c3ced771d680" (UID: "d010e854-5e87-4a27-b194-c3ced771d680"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.011190 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8eb503ba-654f-4997-8581-d2ea619a7b68" (UID: "8eb503ba-654f-4997-8581-d2ea619a7b68"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.011729 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "487b0984-bae1-4730-bfd0-afe920ec974e" (UID: "487b0984-bae1-4730-bfd0-afe920ec974e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.014767 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw" (OuterVolumeSpecName: "kube-api-access-trfxw") pod "8a595383-5c9f-4b27-9612-4a1408221623" (UID: "8a595383-5c9f-4b27-9612-4a1408221623"). InnerVolumeSpecName "kube-api-access-trfxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.015183 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk" (OuterVolumeSpecName: "kube-api-access-fxqqk") pod "487b0984-bae1-4730-bfd0-afe920ec974e" (UID: "487b0984-bae1-4730-bfd0-afe920ec974e"). InnerVolumeSpecName "kube-api-access-fxqqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.017806 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw" (OuterVolumeSpecName: "kube-api-access-f7spw") pod "815af2ae-6f87-4b01-9712-43fb3c70f9a7" (UID: "815af2ae-6f87-4b01-9712-43fb3c70f9a7"). InnerVolumeSpecName "kube-api-access-f7spw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.018926 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6" (OuterVolumeSpecName: "kube-api-access-tkpk6") pod "8eb503ba-654f-4997-8581-d2ea619a7b68" (UID: "8eb503ba-654f-4997-8581-d2ea619a7b68"). InnerVolumeSpecName "kube-api-access-tkpk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.019452 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z" (OuterVolumeSpecName: "kube-api-access-dmx4z") pod "d010e854-5e87-4a27-b194-c3ced771d680" (UID: "d010e854-5e87-4a27-b194-c3ced771d680"). InnerVolumeSpecName "kube-api-access-dmx4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.043430 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7sq8p" event={"ID":"487b0984-bae1-4730-bfd0-afe920ec974e","Type":"ContainerDied","Data":"9b8fe545c4eda778a9fd82f8fca7fa3b10e7f867abfdf28664bd01ac827a1a97"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.043712 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b8fe545c4eda778a9fd82f8fca7fa3b10e7f867abfdf28664bd01ac827a1a97" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.043940 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7sq8p" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.046241 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-cd05-account-create-update-27smf" event={"ID":"8a595383-5c9f-4b27-9612-4a1408221623","Type":"ContainerDied","Data":"178f1c2eeb6fd08401fed4eb3aa6161545aadca0256078eb0fc0ac4c4941fde3"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.046270 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="178f1c2eeb6fd08401fed4eb3aa6161545aadca0256078eb0fc0ac4c4941fde3" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.046312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-cd05-account-create-update-27smf" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.054895 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-78jgw" event={"ID":"4a0e9617-a546-43d8-9ce8-7ff8e22004e8","Type":"ContainerStarted","Data":"3cb74d83906176b3368d7874d4dc52bac2342a925e464506e720f7daf2fd6b77"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.058112 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jmbw4" event={"ID":"8eb503ba-654f-4997-8581-d2ea619a7b68","Type":"ContainerDied","Data":"c09201cfbf1be0835b90438c3f114d91222cd0973446480c56ee977f7c0c7d8c"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.058149 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c09201cfbf1be0835b90438c3f114d91222cd0973446480c56ee977f7c0c7d8c" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.058185 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jmbw4" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.059990 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4bcf-account-create-update-g4lz6" event={"ID":"815af2ae-6f87-4b01-9712-43fb3c70f9a7","Type":"ContainerDied","Data":"98bd312affa5d923610c8ce2b25ed2aa55467ed830b8f540b6996cd7d56cc64d"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.060048 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98bd312affa5d923610c8ce2b25ed2aa55467ed830b8f540b6996cd7d56cc64d" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.060138 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-g4lz6" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.061656 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-jqwsq" event={"ID":"9dcddeb0-9a29-43ba-a4a3-50c920d2603f","Type":"ContainerDied","Data":"2fc7429e1963d8b9ad4d1f018f085915ffe599893a6de9ac7adcc49a17ebe9ec"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.061687 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fc7429e1963d8b9ad4d1f018f085915ffe599893a6de9ac7adcc49a17ebe9ec" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.061739 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-jqwsq" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.064120 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5699-account-create-update-wncww" event={"ID":"d010e854-5e87-4a27-b194-c3ced771d680","Type":"ContainerDied","Data":"a899137902958c9682109ae4f5857da99d26200e7bf87497c1174daf5204f21f"} Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.064279 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a899137902958c9682109ae4f5857da99d26200e7bf87497c1174daf5204f21f" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.064239 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5699-account-create-update-wncww" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.074422 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-78jgw" podStartSLOduration=2.405510695 podStartE2EDuration="7.074406004s" podCreationTimestamp="2026-01-29 11:00:27 +0000 UTC" firstStartedPulling="2026-01-29 11:00:29.071987969 +0000 UTC m=+1126.289319103" lastFinishedPulling="2026-01-29 11:00:33.740883268 +0000 UTC m=+1130.958214412" observedRunningTime="2026-01-29 11:00:34.072336063 +0000 UTC m=+1131.289667207" watchObservedRunningTime="2026-01-29 11:00:34.074406004 +0000 UTC m=+1131.291737138" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.107871 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6djw2\" (UniqueName: \"kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2\") pod \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108088 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts\") pod \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\" (UID: \"9dcddeb0-9a29-43ba-a4a3-50c920d2603f\") " Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108494 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d010e854-5e87-4a27-b194-c3ced771d680-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108513 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eb503ba-654f-4997-8581-d2ea619a7b68-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108524 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/487b0984-bae1-4730-bfd0-afe920ec974e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108535 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/815af2ae-6f87-4b01-9712-43fb3c70f9a7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108546 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trfxw\" (UniqueName: \"kubernetes.io/projected/8a595383-5c9f-4b27-9612-4a1408221623-kube-api-access-trfxw\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108558 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7spw\" (UniqueName: \"kubernetes.io/projected/815af2ae-6f87-4b01-9712-43fb3c70f9a7-kube-api-access-f7spw\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108570 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a595383-5c9f-4b27-9612-4a1408221623-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108597 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkpk6\" (UniqueName: \"kubernetes.io/projected/8eb503ba-654f-4997-8581-d2ea619a7b68-kube-api-access-tkpk6\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108609 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmx4z\" (UniqueName: \"kubernetes.io/projected/d010e854-5e87-4a27-b194-c3ced771d680-kube-api-access-dmx4z\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.108619 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxqqk\" (UniqueName: \"kubernetes.io/projected/487b0984-bae1-4730-bfd0-afe920ec974e-kube-api-access-fxqqk\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.109254 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dcddeb0-9a29-43ba-a4a3-50c920d2603f" (UID: "9dcddeb0-9a29-43ba-a4a3-50c920d2603f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.114756 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2" (OuterVolumeSpecName: "kube-api-access-6djw2") pod "9dcddeb0-9a29-43ba-a4a3-50c920d2603f" (UID: "9dcddeb0-9a29-43ba-a4a3-50c920d2603f"). InnerVolumeSpecName "kube-api-access-6djw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.210521 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:34 crc kubenswrapper[4852]: I0129 11:00:34.210820 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6djw2\" (UniqueName: \"kubernetes.io/projected/9dcddeb0-9a29-43ba-a4a3-50c920d2603f-kube-api-access-6djw2\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:37 crc kubenswrapper[4852]: I0129 11:00:37.104078 4852 generic.go:334] "Generic (PLEG): container finished" podID="4a0e9617-a546-43d8-9ce8-7ff8e22004e8" containerID="3cb74d83906176b3368d7874d4dc52bac2342a925e464506e720f7daf2fd6b77" exitCode=0 Jan 29 11:00:37 crc kubenswrapper[4852]: I0129 11:00:37.104211 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-78jgw" event={"ID":"4a0e9617-a546-43d8-9ce8-7ff8e22004e8","Type":"ContainerDied","Data":"3cb74d83906176b3368d7874d4dc52bac2342a925e464506e720f7daf2fd6b77"} Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.119074 4852 generic.go:334] "Generic (PLEG): container finished" podID="3370d9c7-8c64-443a-82fe-b03172ce44e4" containerID="f85d4051ca659acb4d0c6433794c8cd8c38b06955cbcf113f383525e8fba6fcf" exitCode=0 Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.119335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lrj96" event={"ID":"3370d9c7-8c64-443a-82fe-b03172ce44e4","Type":"ContainerDied","Data":"f85d4051ca659acb4d0c6433794c8cd8c38b06955cbcf113f383525e8fba6fcf"} Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.439798 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.486531 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data\") pod \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.486743 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle\") pod \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.486780 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2j4r9\" (UniqueName: \"kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9\") pod \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\" (UID: \"4a0e9617-a546-43d8-9ce8-7ff8e22004e8\") " Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.506167 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9" (OuterVolumeSpecName: "kube-api-access-2j4r9") pod "4a0e9617-a546-43d8-9ce8-7ff8e22004e8" (UID: "4a0e9617-a546-43d8-9ce8-7ff8e22004e8"). InnerVolumeSpecName "kube-api-access-2j4r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.525404 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a0e9617-a546-43d8-9ce8-7ff8e22004e8" (UID: "4a0e9617-a546-43d8-9ce8-7ff8e22004e8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.551593 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data" (OuterVolumeSpecName: "config-data") pod "4a0e9617-a546-43d8-9ce8-7ff8e22004e8" (UID: "4a0e9617-a546-43d8-9ce8-7ff8e22004e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.590118 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.590171 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2j4r9\" (UniqueName: \"kubernetes.io/projected/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-kube-api-access-2j4r9\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.590186 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a0e9617-a546-43d8-9ce8-7ff8e22004e8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.896993 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.971805 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 11:00:38 crc kubenswrapper[4852]: I0129 11:00:38.972121 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="dnsmasq-dns" containerID="cri-o://22be95aa4a2fc2e72e0861a1e906afc3e8e80e69d7c30301af918bf96fc492e0" gracePeriod=10 Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.128665 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-78jgw" event={"ID":"4a0e9617-a546-43d8-9ce8-7ff8e22004e8","Type":"ContainerDied","Data":"db32618a75ea505dd0a85a41114092f3e50819c735a6765fc94f5856d043d6c5"} Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.128706 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db32618a75ea505dd0a85a41114092f3e50819c735a6765fc94f5856d043d6c5" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.128712 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-78jgw" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.131764 4852 generic.go:334] "Generic (PLEG): container finished" podID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerID="22be95aa4a2fc2e72e0861a1e906afc3e8e80e69d7c30301af918bf96fc492e0" exitCode=0 Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.131959 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" event={"ID":"6257f62d-458f-44ac-abcb-d04f4d6119fa","Type":"ContainerDied","Data":"22be95aa4a2fc2e72e0861a1e906afc3e8e80e69d7c30301af918bf96fc492e0"} Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.297521 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298092 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="487b0984-bae1-4730-bfd0-afe920ec974e" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298108 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="487b0984-bae1-4730-bfd0-afe920ec974e" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298120 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dcddeb0-9a29-43ba-a4a3-50c920d2603f" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298127 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dcddeb0-9a29-43ba-a4a3-50c920d2603f" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298142 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="815af2ae-6f87-4b01-9712-43fb3c70f9a7" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298148 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="815af2ae-6f87-4b01-9712-43fb3c70f9a7" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298159 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a595383-5c9f-4b27-9612-4a1408221623" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298165 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a595383-5c9f-4b27-9612-4a1408221623" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298176 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eb503ba-654f-4997-8581-d2ea619a7b68" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298181 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eb503ba-654f-4997-8581-d2ea619a7b68" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.298201 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d010e854-5e87-4a27-b194-c3ced771d680" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.298206 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d010e854-5e87-4a27-b194-c3ced771d680" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.303154 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a0e9617-a546-43d8-9ce8-7ff8e22004e8" containerName="keystone-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303192 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a0e9617-a546-43d8-9ce8-7ff8e22004e8" containerName="keystone-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303500 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a0e9617-a546-43d8-9ce8-7ff8e22004e8" containerName="keystone-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303537 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="815af2ae-6f87-4b01-9712-43fb3c70f9a7" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303560 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dcddeb0-9a29-43ba-a4a3-50c920d2603f" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303597 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a595383-5c9f-4b27-9612-4a1408221623" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303613 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eb503ba-654f-4997-8581-d2ea619a7b68" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303621 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d010e854-5e87-4a27-b194-c3ced771d680" containerName="mariadb-account-create-update" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.303639 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="487b0984-bae1-4730-bfd0-afe920ec974e" containerName="mariadb-database-create" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.305386 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.314194 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.365120 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-rxnbd"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.366136 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.371276 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.371725 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.371880 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.371986 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-99xbg" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.372094 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.374389 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rxnbd"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411441 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411520 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54zvx\" (UniqueName: \"kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411548 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411593 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411611 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.411626 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.512661 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.512718 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.512752 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb7cb\" (UniqueName: \"kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.512773 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54zvx\" (UniqueName: \"kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.512792 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513166 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513191 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513230 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513245 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513357 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.513703 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.514104 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.514245 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.514785 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.515293 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.546637 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-9kkxs"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.547649 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.560280 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-ljsz5" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.560603 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.560717 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.578718 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9kkxs"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.585374 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.594880 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54zvx\" (UniqueName: \"kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx\") pod \"dnsmasq-dns-6f8c45789f-j6fb8\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.601756 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-n2g46"] Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.602912 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="init" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.602930 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="init" Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.602964 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="dnsmasq-dns" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.602972 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="dnsmasq-dns" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.603207 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" containerName="dnsmasq-dns" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.603786 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.611042 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qtjdx" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.611347 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.611544 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618334 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nxz5\" (UniqueName: \"kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618371 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618402 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618426 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618492 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618511 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618535 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618558 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb7cb\" (UniqueName: \"kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618574 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.618643 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.626941 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.631117 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.633073 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.651745 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.654512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.656274 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.695096 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb7cb\" (UniqueName: \"kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb\") pod \"keystone-bootstrap-rxnbd\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.698852 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-n2g46"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.719802 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb\") pod \"6257f62d-458f-44ac-abcb-d04f4d6119fa\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.719910 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb\") pod \"6257f62d-458f-44ac-abcb-d04f4d6119fa\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.719958 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzc6j\" (UniqueName: \"kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j\") pod \"6257f62d-458f-44ac-abcb-d04f4d6119fa\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.719977 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config\") pod \"6257f62d-458f-44ac-abcb-d04f4d6119fa\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720080 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc\") pod \"6257f62d-458f-44ac-abcb-d04f4d6119fa\" (UID: \"6257f62d-458f-44ac-abcb-d04f4d6119fa\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720277 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720319 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720360 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720404 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nxz5\" (UniqueName: \"kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720427 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720457 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720482 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpfsx\" (UniqueName: \"kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.720518 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.736507 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.737727 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.751540 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.764145 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.764779 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j" (OuterVolumeSpecName: "kube-api-access-kzc6j") pod "6257f62d-458f-44ac-abcb-d04f4d6119fa" (UID: "6257f62d-458f-44ac-abcb-d04f4d6119fa"). InnerVolumeSpecName "kube-api-access-kzc6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.765231 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.774079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.782167 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nxz5\" (UniqueName: \"kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5\") pod \"cinder-db-sync-9kkxs\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.820504 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vn9vq"] Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.822807 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.824417 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.825136 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mhnlh" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.825297 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.825433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpfsx\" (UniqueName: \"kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.825477 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.825978 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.826042 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzc6j\" (UniqueName: \"kubernetes.io/projected/6257f62d-458f-44ac-abcb-d04f4d6119fa-kube-api-access-kzc6j\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.837949 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.842148 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.845321 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.859520 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpfsx\" (UniqueName: \"kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx\") pod \"neutron-db-sync-n2g46\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.865960 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6257f62d-458f-44ac-abcb-d04f4d6119fa" (UID: "6257f62d-458f-44ac-abcb-d04f4d6119fa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.866884 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config" (OuterVolumeSpecName: "config") pod "6257f62d-458f-44ac-abcb-d04f4d6119fa" (UID: "6257f62d-458f-44ac-abcb-d04f4d6119fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.885883 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6257f62d-458f-44ac-abcb-d04f4d6119fa" (UID: "6257f62d-458f-44ac-abcb-d04f4d6119fa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.923179 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6257f62d-458f-44ac-abcb-d04f4d6119fa" (UID: "6257f62d-458f-44ac-abcb-d04f4d6119fa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.933494 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle\") pod \"3370d9c7-8c64-443a-82fe-b03172ce44e4\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.933606 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data\") pod \"3370d9c7-8c64-443a-82fe-b03172ce44e4\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.933625 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data\") pod \"3370d9c7-8c64-443a-82fe-b03172ce44e4\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.933784 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jntxm\" (UniqueName: \"kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm\") pod \"3370d9c7-8c64-443a-82fe-b03172ce44e4\" (UID: \"3370d9c7-8c64-443a-82fe-b03172ce44e4\") " Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934026 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvrrg\" (UniqueName: \"kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934056 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934085 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934111 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934133 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934180 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934190 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934200 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.934207 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6257f62d-458f-44ac-abcb-d04f4d6119fa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.935683 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-pxxmr"] Jan 29 11:00:39 crc kubenswrapper[4852]: E0129 11:00:39.936047 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3370d9c7-8c64-443a-82fe-b03172ce44e4" containerName="glance-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.936058 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3370d9c7-8c64-443a-82fe-b03172ce44e4" containerName="glance-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.936229 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3370d9c7-8c64-443a-82fe-b03172ce44e4" containerName="glance-db-sync" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.936771 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.944125 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-ftd4b" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.944318 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.984768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3370d9c7-8c64-443a-82fe-b03172ce44e4" (UID: "3370d9c7-8c64-443a-82fe-b03172ce44e4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.986791 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm" (OuterVolumeSpecName: "kube-api-access-jntxm") pod "3370d9c7-8c64-443a-82fe-b03172ce44e4" (UID: "3370d9c7-8c64-443a-82fe-b03172ce44e4"). InnerVolumeSpecName "kube-api-access-jntxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:39 crc kubenswrapper[4852]: I0129 11:00:39.994730 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pxxmr"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.007699 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3370d9c7-8c64-443a-82fe-b03172ce44e4" (UID: "3370d9c7-8c64-443a-82fe-b03172ce44e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.025081 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.036831 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.036917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvrrg\" (UniqueName: \"kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.036969 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.036987 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037016 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037041 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037067 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv5h9\" (UniqueName: \"kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037104 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037144 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jntxm\" (UniqueName: \"kubernetes.io/projected/3370d9c7-8c64-443a-82fe-b03172ce44e4-kube-api-access-jntxm\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037154 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.037165 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.039121 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.051008 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vn9vq"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.052819 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.053983 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.054645 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.060146 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.067762 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvrrg\" (UniqueName: \"kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg\") pod \"placement-db-sync-vn9vq\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.073463 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.081809 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n2g46" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.087697 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.126158 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.131782 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data" (OuterVolumeSpecName: "config-data") pod "3370d9c7-8c64-443a-82fe-b03172ce44e4" (UID: "3370d9c7-8c64-443a-82fe-b03172ce44e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.139904 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfc45\" (UniqueName: \"kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140119 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" event={"ID":"6257f62d-458f-44ac-abcb-d04f4d6119fa","Type":"ContainerDied","Data":"4f45ae8c39e03cfac84ba62dc7709338ed4d303b867445c8189436879abebc10"} Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140172 4852 scope.go:117] "RemoveContainer" containerID="22be95aa4a2fc2e72e0861a1e906afc3e8e80e69d7c30301af918bf96fc492e0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140280 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-stv2s" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140406 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140613 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140690 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv5h9\" (UniqueName: \"kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.140890 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.141003 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.141083 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.141194 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3370d9c7-8c64-443a-82fe-b03172ce44e4-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.146034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.146786 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.148325 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lrj96" event={"ID":"3370d9c7-8c64-443a-82fe-b03172ce44e4","Type":"ContainerDied","Data":"9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78"} Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.148414 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e161f383d34c438b013f4e732032cc1412e89bb07cfa702d24ec79bf8cacf78" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.148526 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lrj96" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.158856 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.161759 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv5h9\" (UniqueName: \"kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9\") pod \"barbican-db-sync-pxxmr\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.185637 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.185743 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.193765 4852 scope.go:117] "RemoveContainer" containerID="bf6ed541cda46af4ef834cbab569c19b1408ee781c6ca3eaa57ea916f87cbe92" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.194973 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.195169 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.217742 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vn9vq" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242632 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242729 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nhtx\" (UniqueName: \"kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242750 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242805 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242831 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242853 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfc45\" (UniqueName: \"kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242923 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242958 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.242979 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.243011 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.243035 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.243951 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.244563 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.244885 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.245320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.245745 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.258898 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.262726 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.263031 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfc45\" (UniqueName: \"kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45\") pod \"dnsmasq-dns-fcfdd6f9f-kmgnn\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.270177 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-stv2s"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.344695 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nhtx\" (UniqueName: \"kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.344967 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345021 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345044 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345084 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345114 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.345528 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.352844 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.359079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.362457 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.364367 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.369124 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nhtx\" (UniqueName: \"kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.384249 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.387406 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:40 crc kubenswrapper[4852]: W0129 11:00:40.399184 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d842333_99df_4e89_84cf_4880a8fb7b7b.slice/crio-347879db946943c35d6e266b8c2dc2ac29b11cfde667f228ace1656c3a41a0e9 WatchSource:0}: Error finding container 347879db946943c35d6e266b8c2dc2ac29b11cfde667f228ace1656c3a41a0e9: Status 404 returned error can't find the container with id 347879db946943c35d6e266b8c2dc2ac29b11cfde667f228ace1656c3a41a0e9 Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.435445 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.523868 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.571604 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9kkxs"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.588438 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rxnbd"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.653324 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.705625 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.707212 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.758268 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.759371 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.759516 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.759680 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mndxh\" (UniqueName: \"kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.759869 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.760107 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.760254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.848225 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-n2g46"] Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868077 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868135 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868194 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868221 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mndxh\" (UniqueName: \"kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868245 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.868266 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.869014 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.869340 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.870659 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.871782 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.872194 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.891713 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mndxh\" (UniqueName: \"kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh\") pod \"dnsmasq-dns-57c957c4ff-t9jrt\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:40 crc kubenswrapper[4852]: I0129 11:00:40.983208 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vn9vq"] Jan 29 11:00:40 crc kubenswrapper[4852]: W0129 11:00:40.998429 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5ef08b5_420f_46cd_bc10_f021836fd6ee.slice/crio-8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80 WatchSource:0}: Error finding container 8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80: Status 404 returned error can't find the container with id 8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80 Jan 29 11:00:41 crc kubenswrapper[4852]: I0129 11:00:41.049933 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:00:41 crc kubenswrapper[4852]: I0129 11:00:41.086826 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pxxmr"] Jan 29 11:00:41 crc kubenswrapper[4852]: I0129 11:00:41.171100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pxxmr" event={"ID":"08ba6d45-1a5b-4131-baae-76160239df48","Type":"ContainerStarted","Data":"53b92605876f248bf9a727df66912072064dc9d107ecff1da958c85874137da6"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.181011 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.183338 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n2g46" event={"ID":"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f","Type":"ContainerStarted","Data":"cfc48cd686d7714ca17560f6be7658f618fc2eb927f3f91dac0d4e3c37a9cb42"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.183367 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n2g46" event={"ID":"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f","Type":"ContainerStarted","Data":"b97de7f6ec320143eb91f474e51a7086436b2d7a3b7194ceae48dcd8cfbe2718"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.202063 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-n2g46" podStartSLOduration=2.202041054 podStartE2EDuration="2.202041054s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:41.201622524 +0000 UTC m=+1138.418953648" watchObservedRunningTime="2026-01-29 11:00:41.202041054 +0000 UTC m=+1138.419372198" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.204771 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rxnbd" event={"ID":"b6d771e3-3ed1-4943-901a-306b6464ead7","Type":"ContainerStarted","Data":"5b404d0a771ad698caf04956b2e2d5bcaa400691da0f35ce44408eb4b17c9d32"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.204809 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rxnbd" event={"ID":"b6d771e3-3ed1-4943-901a-306b6464ead7","Type":"ContainerStarted","Data":"f2e4dbe5dfdedd7d3333819689b5905e614bc30b8346c5c25d34bdb59b58c721"} Jan 29 11:00:43 crc kubenswrapper[4852]: W0129 11:00:41.219699 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07acf8fc_2a7e_4ce9_8deb_fde14fb5976a.slice/crio-abe7066aed8b2ee54354fa14d036f68fd87f30557c3362288b2f22d9d6c2b332 WatchSource:0}: Error finding container abe7066aed8b2ee54354fa14d036f68fd87f30557c3362288b2f22d9d6c2b332: Status 404 returned error can't find the container with id abe7066aed8b2ee54354fa14d036f68fd87f30557c3362288b2f22d9d6c2b332 Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.230764 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-rxnbd" podStartSLOduration=2.230741159 podStartE2EDuration="2.230741159s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:41.225790917 +0000 UTC m=+1138.443122061" watchObservedRunningTime="2026-01-29 11:00:41.230741159 +0000 UTC m=+1138.448072293" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.231436 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9kkxs" event={"ID":"88726a03-9dc5-49b5-b4cc-60b521b51d61","Type":"ContainerStarted","Data":"810adc495248c520a873e86ec26e443fb508609ceed54c8e342c7f9b54e48a9e"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.244157 4852 generic.go:334] "Generic (PLEG): container finished" podID="6d842333-99df-4e89-84cf-4880a8fb7b7b" containerID="1129f84972fdff8811d8d96bee3348a6ca74ccf9632721cc7597ef7abb9906b1" exitCode=0 Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.244638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" event={"ID":"6d842333-99df-4e89-84cf-4880a8fb7b7b","Type":"ContainerDied","Data":"1129f84972fdff8811d8d96bee3348a6ca74ccf9632721cc7597ef7abb9906b1"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.244661 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" event={"ID":"6d842333-99df-4e89-84cf-4880a8fb7b7b","Type":"ContainerStarted","Data":"347879db946943c35d6e266b8c2dc2ac29b11cfde667f228ace1656c3a41a0e9"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.257520 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vn9vq" event={"ID":"d5ef08b5-420f-46cd-bc10-f021836fd6ee","Type":"ContainerStarted","Data":"8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.319800 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: W0129 11:00:41.328470 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf988dca9_b8dd_406e_b316_d27052f43c80.slice/crio-e4028ccc4c654e6cf43e72ee673999be03e16430749cc61f57d452db9f477cee WatchSource:0}: Error finding container e4028ccc4c654e6cf43e72ee673999be03e16430749cc61f57d452db9f477cee: Status 404 returned error can't find the container with id e4028ccc4c654e6cf43e72ee673999be03e16430749cc61f57d452db9f477cee Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.478164 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6257f62d-458f-44ac-abcb-d04f4d6119fa" path="/var/lib/kubelet/pods/6257f62d-458f-44ac-abcb-d04f4d6119fa/volumes" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.520282 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.522013 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.531570 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.531833 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-php2d" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.532043 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.564975 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583203 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htpdv\" (UniqueName: \"kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583431 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583537 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583802 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.583923 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.685913 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.686731 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.686846 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.686926 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.687019 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.687111 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.687171 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htpdv\" (UniqueName: \"kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.687271 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.687435 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.688824 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.696157 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.699922 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.714241 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.718752 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htpdv\" (UniqueName: \"kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.726051 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.783902 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.785758 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.799709 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.801126 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.856671 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905150 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnkcz\" (UniqueName: \"kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905218 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905267 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905312 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905335 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905352 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:41.905406 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006754 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006825 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006857 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006883 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006931 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.006985 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnkcz\" (UniqueName: \"kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.008036 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.040185 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.084777 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.137298 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.142196 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.142273 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnkcz\" (UniqueName: \"kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.145064 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.145948 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.272959 4852 generic.go:334] "Generic (PLEG): container finished" podID="07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" containerID="8cbf155995c2ede00a5cf056efa1d418b05d4027fc6796c72e7656576c9ad36d" exitCode=0 Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.273033 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" event={"ID":"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a","Type":"ContainerDied","Data":"8cbf155995c2ede00a5cf056efa1d418b05d4027fc6796c72e7656576c9ad36d"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.273088 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" event={"ID":"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a","Type":"ContainerStarted","Data":"abe7066aed8b2ee54354fa14d036f68fd87f30557c3362288b2f22d9d6c2b332"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.274591 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerStarted","Data":"e4028ccc4c654e6cf43e72ee673999be03e16430749cc61f57d452db9f477cee"} Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.428752 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:42.961359 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.034533 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.347795 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.615228 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.643874 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.693803 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.694101 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.694216 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfc45\" (UniqueName: \"kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.694283 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.694325 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.694388 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0\") pod \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\" (UID: \"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.718524 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.719257 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45" (OuterVolumeSpecName: "kube-api-access-gfc45") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "kube-api-access-gfc45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.738633 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.741068 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.741144 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfc45\" (UniqueName: \"kubernetes.io/projected/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-kube-api-access-gfc45\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.779300 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.780562 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: W0129 11:00:43.817263 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c6cf90c_43e5_44e0_aacf_c92b343a72a2.slice/crio-952f44ba01c85fb396f9b7cb4a0d3ed47ddcc5693f0f0f506035e3aa03a71006 WatchSource:0}: Error finding container 952f44ba01c85fb396f9b7cb4a0d3ed47ddcc5693f0f0f506035e3aa03a71006: Status 404 returned error can't find the container with id 952f44ba01c85fb396f9b7cb4a0d3ed47ddcc5693f0f0f506035e3aa03a71006 Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.822756 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.824342 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config" (OuterVolumeSpecName: "config") pod "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" (UID: "07acf8fc-2a7e-4ce9-8deb-fde14fb5976a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843142 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843192 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843211 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843242 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843269 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54zvx\" (UniqueName: \"kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843377 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc\") pod \"6d842333-99df-4e89-84cf-4880a8fb7b7b\" (UID: \"6d842333-99df-4e89-84cf-4880a8fb7b7b\") " Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843786 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843802 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843812 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.843824 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.859616 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx" (OuterVolumeSpecName: "kube-api-access-54zvx") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "kube-api-access-54zvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.878345 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.897997 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config" (OuterVolumeSpecName: "config") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.899606 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.907388 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.914051 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6d842333-99df-4e89-84cf-4880a8fb7b7b" (UID: "6d842333-99df-4e89-84cf-4880a8fb7b7b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.927663 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947628 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947667 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947676 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947686 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947699 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54zvx\" (UniqueName: \"kubernetes.io/projected/6d842333-99df-4e89-84cf-4880a8fb7b7b-kube-api-access-54zvx\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:43 crc kubenswrapper[4852]: I0129 11:00:43.947714 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d842333-99df-4e89-84cf-4880a8fb7b7b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.295558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerStarted","Data":"952f44ba01c85fb396f9b7cb4a0d3ed47ddcc5693f0f0f506035e3aa03a71006"} Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.296922 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" event={"ID":"33e33319-f0fa-4c80-af5f-47a6fc4e7e23","Type":"ContainerStarted","Data":"807f30cc49ba546b7def8ca35bfcc32f22f0aa8bb86221cb243042013937c9e2"} Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.299110 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" event={"ID":"6d842333-99df-4e89-84cf-4880a8fb7b7b","Type":"ContainerDied","Data":"347879db946943c35d6e266b8c2dc2ac29b11cfde667f228ace1656c3a41a0e9"} Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.299152 4852 scope.go:117] "RemoveContainer" containerID="1129f84972fdff8811d8d96bee3348a6ca74ccf9632721cc7597ef7abb9906b1" Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.300245 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-j6fb8" Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.301131 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" event={"ID":"07acf8fc-2a7e-4ce9-8deb-fde14fb5976a","Type":"ContainerDied","Data":"abe7066aed8b2ee54354fa14d036f68fd87f30557c3362288b2f22d9d6c2b332"} Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.301187 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-kmgnn" Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.383077 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.405450 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-kmgnn"] Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.431802 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.436146 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-j6fb8"] Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.533181 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:00:44 crc kubenswrapper[4852]: I0129 11:00:44.780437 4852 scope.go:117] "RemoveContainer" containerID="8cbf155995c2ede00a5cf056efa1d418b05d4027fc6796c72e7656576c9ad36d" Jan 29 11:00:44 crc kubenswrapper[4852]: W0129 11:00:44.788701 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07faeb84_d0de_4e4d_b9a8_108527fc24d2.slice/crio-a2cbb091ffa7de73435d0adeeb67014bd3fe2ebfeeb3af76c13826cb09f549aa WatchSource:0}: Error finding container a2cbb091ffa7de73435d0adeeb67014bd3fe2ebfeeb3af76c13826cb09f549aa: Status 404 returned error can't find the container with id a2cbb091ffa7de73435d0adeeb67014bd3fe2ebfeeb3af76c13826cb09f549aa Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.334784 4852 generic.go:334] "Generic (PLEG): container finished" podID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerID="4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227" exitCode=0 Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.334857 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" event={"ID":"33e33319-f0fa-4c80-af5f-47a6fc4e7e23","Type":"ContainerDied","Data":"4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227"} Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.369805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerStarted","Data":"a2cbb091ffa7de73435d0adeeb67014bd3fe2ebfeeb3af76c13826cb09f549aa"} Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.388322 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerStarted","Data":"6aece293d8335c3ef591a685d40052f025c4ccbd2769ceae9c14f91295a717c9"} Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.477474 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" path="/var/lib/kubelet/pods/07acf8fc-2a7e-4ce9-8deb-fde14fb5976a/volumes" Jan 29 11:00:45 crc kubenswrapper[4852]: I0129 11:00:45.478147 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d842333-99df-4e89-84cf-4880a8fb7b7b" path="/var/lib/kubelet/pods/6d842333-99df-4e89-84cf-4880a8fb7b7b/volumes" Jan 29 11:00:46 crc kubenswrapper[4852]: I0129 11:00:46.404426 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerStarted","Data":"d540c8bc726d6598eb67cf65884a8b9d36d942af68841cbcca64f50a9ce4b88e"} Jan 29 11:00:46 crc kubenswrapper[4852]: I0129 11:00:46.405881 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-log" containerID="cri-o://6aece293d8335c3ef591a685d40052f025c4ccbd2769ceae9c14f91295a717c9" gracePeriod=30 Jan 29 11:00:46 crc kubenswrapper[4852]: I0129 11:00:46.405973 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-httpd" containerID="cri-o://d540c8bc726d6598eb67cf65884a8b9d36d942af68841cbcca64f50a9ce4b88e" gracePeriod=30 Jan 29 11:00:46 crc kubenswrapper[4852]: I0129 11:00:46.418135 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerStarted","Data":"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d"} Jan 29 11:00:46 crc kubenswrapper[4852]: I0129 11:00:46.438423 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.438406621 podStartE2EDuration="6.438406621s" podCreationTimestamp="2026-01-29 11:00:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:00:46.438163565 +0000 UTC m=+1143.655494729" watchObservedRunningTime="2026-01-29 11:00:46.438406621 +0000 UTC m=+1143.655737755" Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.429613 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rxnbd" event={"ID":"b6d771e3-3ed1-4943-901a-306b6464ead7","Type":"ContainerDied","Data":"5b404d0a771ad698caf04956b2e2d5bcaa400691da0f35ce44408eb4b17c9d32"} Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.429614 4852 generic.go:334] "Generic (PLEG): container finished" podID="b6d771e3-3ed1-4943-901a-306b6464ead7" containerID="5b404d0a771ad698caf04956b2e2d5bcaa400691da0f35ce44408eb4b17c9d32" exitCode=0 Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.432169 4852 generic.go:334] "Generic (PLEG): container finished" podID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerID="d540c8bc726d6598eb67cf65884a8b9d36d942af68841cbcca64f50a9ce4b88e" exitCode=0 Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.432200 4852 generic.go:334] "Generic (PLEG): container finished" podID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerID="6aece293d8335c3ef591a685d40052f025c4ccbd2769ceae9c14f91295a717c9" exitCode=143 Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.432220 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerDied","Data":"d540c8bc726d6598eb67cf65884a8b9d36d942af68841cbcca64f50a9ce4b88e"} Jan 29 11:00:47 crc kubenswrapper[4852]: I0129 11:00:47.432245 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerDied","Data":"6aece293d8335c3ef591a685d40052f025c4ccbd2769ceae9c14f91295a717c9"} Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.007908 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.008695 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wvrrg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-vn9vq_openstack(d5ef08b5-420f-46cd-bc10-f021836fd6ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.010081 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-vn9vq" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.117486 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.123663 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258161 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258252 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258385 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258406 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258429 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258448 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258469 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258496 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb7cb\" (UniqueName: \"kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258532 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258553 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258601 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys\") pod \"b6d771e3-3ed1-4943-901a-306b6464ead7\" (UID: \"b6d771e3-3ed1-4943-901a-306b6464ead7\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.258622 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnkcz\" (UniqueName: \"kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz\") pod \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\" (UID: \"0c6cf90c-43e5-44e0-aacf-c92b343a72a2\") " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.260238 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.260513 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs" (OuterVolumeSpecName: "logs") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.265305 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts" (OuterVolumeSpecName: "scripts") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.265361 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.266558 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.267176 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.269998 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz" (OuterVolumeSpecName: "kube-api-access-jnkcz") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "kube-api-access-jnkcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.275089 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb" (OuterVolumeSpecName: "kube-api-access-zb7cb") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "kube-api-access-zb7cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.280120 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts" (OuterVolumeSpecName: "scripts") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.290998 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.296204 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.307116 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data" (OuterVolumeSpecName: "config-data") pod "b6d771e3-3ed1-4943-901a-306b6464ead7" (UID: "b6d771e3-3ed1-4943-901a-306b6464ead7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.318021 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data" (OuterVolumeSpecName: "config-data") pod "0c6cf90c-43e5-44e0-aacf-c92b343a72a2" (UID: "0c6cf90c-43e5-44e0-aacf-c92b343a72a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361056 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361093 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361106 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361117 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361129 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361144 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361180 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361201 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb7cb\" (UniqueName: \"kubernetes.io/projected/b6d771e3-3ed1-4943-901a-306b6464ead7-kube-api-access-zb7cb\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361215 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361225 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361235 4852 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6d771e3-3ed1-4943-901a-306b6464ead7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361247 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnkcz\" (UniqueName: \"kubernetes.io/projected/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-kube-api-access-jnkcz\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.361258 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6cf90c-43e5-44e0-aacf-c92b343a72a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.383150 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.462952 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.517358 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rxnbd" event={"ID":"b6d771e3-3ed1-4943-901a-306b6464ead7","Type":"ContainerDied","Data":"f2e4dbe5dfdedd7d3333819689b5905e614bc30b8346c5c25d34bdb59b58c721"} Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.517398 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2e4dbe5dfdedd7d3333819689b5905e614bc30b8346c5c25d34bdb59b58c721" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.517445 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rxnbd" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.521844 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0c6cf90c-43e5-44e0-aacf-c92b343a72a2","Type":"ContainerDied","Data":"952f44ba01c85fb396f9b7cb4a0d3ed47ddcc5693f0f0f506035e3aa03a71006"} Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.521914 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.521915 4852 scope.go:117] "RemoveContainer" containerID="d540c8bc726d6598eb67cf65884a8b9d36d942af68841cbcca64f50a9ce4b88e" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.523178 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-vn9vq" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.564627 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.571828 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593307 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.593776 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593799 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.593819 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d771e3-3ed1-4943-901a-306b6464ead7" containerName="keystone-bootstrap" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593827 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d771e3-3ed1-4943-901a-306b6464ead7" containerName="keystone-bootstrap" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.593845 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-httpd" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593852 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-httpd" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.593876 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-log" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593883 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-log" Jan 29 11:00:55 crc kubenswrapper[4852]: E0129 11:00:55.593908 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d842333-99df-4e89-84cf-4880a8fb7b7b" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.593916 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d842333-99df-4e89-84cf-4880a8fb7b7b" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.594074 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-httpd" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.594091 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6d771e3-3ed1-4943-901a-306b6464ead7" containerName="keystone-bootstrap" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.594107 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" containerName="glance-log" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.594124 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d842333-99df-4e89-84cf-4880a8fb7b7b" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.594136 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="07acf8fc-2a7e-4ce9-8deb-fde14fb5976a" containerName="init" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.595011 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.597942 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.600929 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.607687 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.789714 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.790269 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.790444 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.790782 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.791593 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.791785 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjm5t\" (UniqueName: \"kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.791934 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.792103 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893762 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893822 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893856 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893897 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893950 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.893990 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjm5t\" (UniqueName: \"kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.894018 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.894039 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.894521 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.894752 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.895093 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.903282 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.907511 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.908460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.910767 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.929290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjm5t\" (UniqueName: \"kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:55 crc kubenswrapper[4852]: I0129 11:00:55.965975 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.196672 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-rxnbd"] Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.204762 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-rxnbd"] Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.219941 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.297304 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jxll9"] Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.299171 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.301086 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.304605 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.305971 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-99xbg" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.306138 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.306275 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.308064 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jxll9"] Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403135 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403235 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403270 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403287 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403308 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.403629 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbbct\" (UniqueName: \"kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505622 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505754 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbbct\" (UniqueName: \"kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505788 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505897 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505945 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.505966 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.510201 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.510933 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.512932 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.513157 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.513346 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.522222 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbbct\" (UniqueName: \"kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct\") pod \"keystone-bootstrap-jxll9\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:56 crc kubenswrapper[4852]: I0129 11:00:56.624571 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:00:57 crc kubenswrapper[4852]: I0129 11:00:57.473884 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c6cf90c-43e5-44e0-aacf-c92b343a72a2" path="/var/lib/kubelet/pods/0c6cf90c-43e5-44e0-aacf-c92b343a72a2/volumes" Jan 29 11:00:57 crc kubenswrapper[4852]: I0129 11:00:57.474980 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6d771e3-3ed1-4943-901a-306b6464ead7" path="/var/lib/kubelet/pods/b6d771e3-3ed1-4943-901a-306b6464ead7/volumes" Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.017401 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.017759 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.017808 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.018547 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.018622 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced" gracePeriod=600 Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.563652 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced" exitCode=0 Jan 29 11:01:00 crc kubenswrapper[4852]: I0129 11:01:00.563691 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced"} Jan 29 11:01:03 crc kubenswrapper[4852]: I0129 11:01:03.935649 4852 scope.go:117] "RemoveContainer" containerID="6aece293d8335c3ef591a685d40052f025c4ccbd2769ceae9c14f91295a717c9" Jan 29 11:01:03 crc kubenswrapper[4852]: E0129 11:01:03.944943 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 29 11:01:03 crc kubenswrapper[4852]: E0129 11:01:03.945102 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9nxz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-9kkxs_openstack(88726a03-9dc5-49b5-b4cc-60b521b51d61): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 11:01:03 crc kubenswrapper[4852]: E0129 11:01:03.946387 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-9kkxs" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.132524 4852 scope.go:117] "RemoveContainer" containerID="fb2ed3c4caa5478d63bcb1710ebf19f9b201d62d528f176bfc9d19e4065c39e0" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.383997 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jxll9"] Jan 29 11:01:04 crc kubenswrapper[4852]: W0129 11:01:04.392288 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod716ef279_3244_4dd1_8ae4_a0b17c4d119e.slice/crio-c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc WatchSource:0}: Error finding container c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc: Status 404 returned error can't find the container with id c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.563595 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:01:04 crc kubenswrapper[4852]: W0129 11:01:04.568054 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca037db5_f99e_4aa3_9725_68ac7fce0bf8.slice/crio-87111d58a02619facf35301fbfb476e198cf5647b0ab4f8c56a34cd15946f223 WatchSource:0}: Error finding container 87111d58a02619facf35301fbfb476e198cf5647b0ab4f8c56a34cd15946f223: Status 404 returned error can't find the container with id 87111d58a02619facf35301fbfb476e198cf5647b0ab4f8c56a34cd15946f223 Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.618369 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerStarted","Data":"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.626360 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" event={"ID":"33e33319-f0fa-4c80-af5f-47a6fc4e7e23","Type":"ContainerStarted","Data":"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.627119 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.627886 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerStarted","Data":"87111d58a02619facf35301fbfb476e198cf5647b0ab4f8c56a34cd15946f223"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.632232 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerStarted","Data":"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.632672 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-log" containerID="cri-o://9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" gracePeriod=30 Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.632691 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-httpd" containerID="cri-o://a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" gracePeriod=30 Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.636198 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jxll9" event={"ID":"716ef279-3244-4dd1-8ae4-a0b17c4d119e","Type":"ContainerStarted","Data":"d105533ecc823dceee85f7dd1ee79ff7664e044b9219ccc2db75fb06d4b37611"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.636249 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jxll9" event={"ID":"716ef279-3244-4dd1-8ae4-a0b17c4d119e","Type":"ContainerStarted","Data":"c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.641487 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pxxmr" event={"ID":"08ba6d45-1a5b-4131-baae-76160239df48","Type":"ContainerStarted","Data":"1f00cf17358b6899eebd1eaba2bbf4f8b138d64c109c97c9a622904c07013291"} Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.648723 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087"} Jan 29 11:01:04 crc kubenswrapper[4852]: E0129 11:01:04.651499 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-9kkxs" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.654039 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" podStartSLOduration=24.654024304 podStartE2EDuration="24.654024304s" podCreationTimestamp="2026-01-29 11:00:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:04.643674891 +0000 UTC m=+1161.861006025" watchObservedRunningTime="2026-01-29 11:01:04.654024304 +0000 UTC m=+1161.871355438" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.673094 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-pxxmr" podStartSLOduration=2.8721229790000002 podStartE2EDuration="25.673078242s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="2026-01-29 11:00:41.117644983 +0000 UTC m=+1138.334976107" lastFinishedPulling="2026-01-29 11:01:03.918600236 +0000 UTC m=+1161.135931370" observedRunningTime="2026-01-29 11:01:04.666548922 +0000 UTC m=+1161.883880056" watchObservedRunningTime="2026-01-29 11:01:04.673078242 +0000 UTC m=+1161.890409376" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.709995 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=24.709972127 podStartE2EDuration="24.709972127s" podCreationTimestamp="2026-01-29 11:00:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:04.691412522 +0000 UTC m=+1161.908743676" watchObservedRunningTime="2026-01-29 11:01:04.709972127 +0000 UTC m=+1161.927303281" Jan 29 11:01:04 crc kubenswrapper[4852]: I0129 11:01:04.718233 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jxll9" podStartSLOduration=8.718207519 podStartE2EDuration="8.718207519s" podCreationTimestamp="2026-01-29 11:00:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:04.711702749 +0000 UTC m=+1161.929033893" watchObservedRunningTime="2026-01-29 11:01:04.718207519 +0000 UTC m=+1161.935538653" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.220558 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.363194 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.363892 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.363939 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.363974 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htpdv\" (UniqueName: \"kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.364011 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.364043 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.364184 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\" (UID: \"07faeb84-d0de-4e4d-b9a8-108527fc24d2\") " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.364767 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.364830 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs" (OuterVolumeSpecName: "logs") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.371308 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts" (OuterVolumeSpecName: "scripts") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.371499 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv" (OuterVolumeSpecName: "kube-api-access-htpdv") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "kube-api-access-htpdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.380529 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.418465 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.431982 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data" (OuterVolumeSpecName: "config-data") pod "07faeb84-d0de-4e4d-b9a8-108527fc24d2" (UID: "07faeb84-d0de-4e4d-b9a8-108527fc24d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466011 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466040 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466049 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466058 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07faeb84-d0de-4e4d-b9a8-108527fc24d2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466066 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htpdv\" (UniqueName: \"kubernetes.io/projected/07faeb84-d0de-4e4d-b9a8-108527fc24d2-kube-api-access-htpdv\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466074 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.466081 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07faeb84-d0de-4e4d-b9a8-108527fc24d2-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.487298 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.568142 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.670313 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerStarted","Data":"c6609e5d726de654a9cb499621fa77544de875cce035a45226fea618a56c2edd"} Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.675890 4852 generic.go:334] "Generic (PLEG): container finished" podID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerID="a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" exitCode=143 Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.675933 4852 generic.go:334] "Generic (PLEG): container finished" podID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerID="9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" exitCode=143 Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.677500 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.678443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerDied","Data":"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104"} Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.678485 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerDied","Data":"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d"} Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.678502 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07faeb84-d0de-4e4d-b9a8-108527fc24d2","Type":"ContainerDied","Data":"a2cbb091ffa7de73435d0adeeb67014bd3fe2ebfeeb3af76c13826cb09f549aa"} Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.678521 4852 scope.go:117] "RemoveContainer" containerID="a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.710690 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.728397 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.732809 4852 scope.go:117] "RemoveContainer" containerID="9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.740129 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:01:05 crc kubenswrapper[4852]: E0129 11:01:05.740741 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-log" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.740827 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-log" Jan 29 11:01:05 crc kubenswrapper[4852]: E0129 11:01:05.740852 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-httpd" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.740859 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-httpd" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.741061 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-httpd" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.741075 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" containerName="glance-log" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.753441 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.755793 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.760124 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.760196 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.835295 4852 scope.go:117] "RemoveContainer" containerID="a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" Jan 29 11:01:05 crc kubenswrapper[4852]: E0129 11:01:05.844070 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104\": container with ID starting with a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104 not found: ID does not exist" containerID="a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.844114 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104"} err="failed to get container status \"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104\": rpc error: code = NotFound desc = could not find container \"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104\": container with ID starting with a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104 not found: ID does not exist" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.844142 4852 scope.go:117] "RemoveContainer" containerID="9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" Jan 29 11:01:05 crc kubenswrapper[4852]: E0129 11:01:05.845485 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d\": container with ID starting with 9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d not found: ID does not exist" containerID="9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.845650 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d"} err="failed to get container status \"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d\": rpc error: code = NotFound desc = could not find container \"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d\": container with ID starting with 9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d not found: ID does not exist" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.845665 4852 scope.go:117] "RemoveContainer" containerID="a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.845876 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104"} err="failed to get container status \"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104\": rpc error: code = NotFound desc = could not find container \"a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104\": container with ID starting with a302ccb1b676f1035c386b334fcaabc24697397a346ed6c3ba01db9ca4985104 not found: ID does not exist" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.845890 4852 scope.go:117] "RemoveContainer" containerID="9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.846234 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d"} err="failed to get container status \"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d\": rpc error: code = NotFound desc = could not find container \"9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d\": container with ID starting with 9c6913d4ea688a37668185398c24361ed3bf8443fe2a8d85bc0448ae849ee39d not found: ID does not exist" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.872868 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.872926 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.872948 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.872983 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.872999 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.873015 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb6wx\" (UniqueName: \"kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.873057 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.873103 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978096 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978349 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978382 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978469 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978496 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978519 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb6wx\" (UniqueName: \"kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978626 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.978620 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.998204 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.998378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.998683 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:05 crc kubenswrapper[4852]: I0129 11:01:05.998907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.000294 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb6wx\" (UniqueName: \"kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.002659 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.004182 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.290311 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.407023 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.690038 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerStarted","Data":"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a"} Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.693222 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerStarted","Data":"074535fc1a848dd48010bf1cb1212242e2fdbd9fa2595875fd1eedda35702c32"} Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.724067 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.724049735 podStartE2EDuration="11.724049735s" podCreationTimestamp="2026-01-29 11:00:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:06.719072123 +0000 UTC m=+1163.936403267" watchObservedRunningTime="2026-01-29 11:01:06.724049735 +0000 UTC m=+1163.941380859" Jan 29 11:01:06 crc kubenswrapper[4852]: I0129 11:01:06.918464 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:01:06 crc kubenswrapper[4852]: W0129 11:01:06.924795 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73161d78_6281_43c9_a807_b6bc7c0dde4b.slice/crio-0d16749121d381bac9154c29947c4f92ada72907eb2790138f57ecde3561c442 WatchSource:0}: Error finding container 0d16749121d381bac9154c29947c4f92ada72907eb2790138f57ecde3561c442: Status 404 returned error can't find the container with id 0d16749121d381bac9154c29947c4f92ada72907eb2790138f57ecde3561c442 Jan 29 11:01:07 crc kubenswrapper[4852]: I0129 11:01:07.492749 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07faeb84-d0de-4e4d-b9a8-108527fc24d2" path="/var/lib/kubelet/pods/07faeb84-d0de-4e4d-b9a8-108527fc24d2/volumes" Jan 29 11:01:07 crc kubenswrapper[4852]: I0129 11:01:07.704230 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerStarted","Data":"0d16749121d381bac9154c29947c4f92ada72907eb2790138f57ecde3561c442"} Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.715049 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerStarted","Data":"fedc4b54914d87c6de3a8d3692807080d96fd9ab2c84838df3fc1677b015647e"} Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.715444 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerStarted","Data":"816812f230b8d0a4042e6e45d4ac7ad6d929f47b4ccbc9ef05c1721695841714"} Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.718061 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vn9vq" event={"ID":"d5ef08b5-420f-46cd-bc10-f021836fd6ee","Type":"ContainerStarted","Data":"1f82e67cdf0a37bc15bd82c2d397a0472b885d845d15fea79033f1d869c2c63a"} Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.720193 4852 generic.go:334] "Generic (PLEG): container finished" podID="716ef279-3244-4dd1-8ae4-a0b17c4d119e" containerID="d105533ecc823dceee85f7dd1ee79ff7664e044b9219ccc2db75fb06d4b37611" exitCode=0 Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.720228 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jxll9" event={"ID":"716ef279-3244-4dd1-8ae4-a0b17c4d119e","Type":"ContainerDied","Data":"d105533ecc823dceee85f7dd1ee79ff7664e044b9219ccc2db75fb06d4b37611"} Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.736210 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.736193156 podStartE2EDuration="3.736193156s" podCreationTimestamp="2026-01-29 11:01:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:08.731857639 +0000 UTC m=+1165.949188793" watchObservedRunningTime="2026-01-29 11:01:08.736193156 +0000 UTC m=+1165.953524290" Jan 29 11:01:08 crc kubenswrapper[4852]: I0129 11:01:08.751090 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vn9vq" podStartSLOduration=2.8181615349999998 podStartE2EDuration="29.75106827s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="2026-01-29 11:00:41.005074411 +0000 UTC m=+1138.222405545" lastFinishedPulling="2026-01-29 11:01:07.937981146 +0000 UTC m=+1165.155312280" observedRunningTime="2026-01-29 11:01:08.747852472 +0000 UTC m=+1165.965183626" watchObservedRunningTime="2026-01-29 11:01:08.75106827 +0000 UTC m=+1165.968399404" Jan 29 11:01:09 crc kubenswrapper[4852]: I0129 11:01:09.732528 4852 generic.go:334] "Generic (PLEG): container finished" podID="08ba6d45-1a5b-4131-baae-76160239df48" containerID="1f00cf17358b6899eebd1eaba2bbf4f8b138d64c109c97c9a622904c07013291" exitCode=0 Jan 29 11:01:09 crc kubenswrapper[4852]: I0129 11:01:09.732636 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pxxmr" event={"ID":"08ba6d45-1a5b-4131-baae-76160239df48","Type":"ContainerDied","Data":"1f00cf17358b6899eebd1eaba2bbf4f8b138d64c109c97c9a622904c07013291"} Jan 29 11:01:09 crc kubenswrapper[4852]: I0129 11:01:09.735265 4852 generic.go:334] "Generic (PLEG): container finished" podID="ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" containerID="cfc48cd686d7714ca17560f6be7658f618fc2eb927f3f91dac0d4e3c37a9cb42" exitCode=0 Jan 29 11:01:09 crc kubenswrapper[4852]: I0129 11:01:09.735329 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n2g46" event={"ID":"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f","Type":"ContainerDied","Data":"cfc48cd686d7714ca17560f6be7658f618fc2eb927f3f91dac0d4e3c37a9cb42"} Jan 29 11:01:10 crc kubenswrapper[4852]: I0129 11:01:10.748384 4852 generic.go:334] "Generic (PLEG): container finished" podID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" containerID="1f82e67cdf0a37bc15bd82c2d397a0472b885d845d15fea79033f1d869c2c63a" exitCode=0 Jan 29 11:01:10 crc kubenswrapper[4852]: I0129 11:01:10.748442 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vn9vq" event={"ID":"d5ef08b5-420f-46cd-bc10-f021836fd6ee","Type":"ContainerDied","Data":"1f82e67cdf0a37bc15bd82c2d397a0472b885d845d15fea79033f1d869c2c63a"} Jan 29 11:01:11 crc kubenswrapper[4852]: I0129 11:01:11.051805 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:01:11 crc kubenswrapper[4852]: I0129 11:01:11.117300 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:01:11 crc kubenswrapper[4852]: I0129 11:01:11.117572 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="dnsmasq-dns" containerID="cri-o://746f1dd9de205fcb69bb9ffeae3ec14fe781bcf2e7c81382b54f3b30ebbced42" gracePeriod=10 Jan 29 11:01:11 crc kubenswrapper[4852]: I0129 11:01:11.775331 4852 generic.go:334] "Generic (PLEG): container finished" podID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerID="746f1dd9de205fcb69bb9ffeae3ec14fe781bcf2e7c81382b54f3b30ebbced42" exitCode=0 Jan 29 11:01:11 crc kubenswrapper[4852]: I0129 11:01:11.775726 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" event={"ID":"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4","Type":"ContainerDied","Data":"746f1dd9de205fcb69bb9ffeae3ec14fe781bcf2e7c81382b54f3b30ebbced42"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.235974 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n2g46" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.251449 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.274103 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.282270 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vn9vq" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.304973 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305060 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305079 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbbct\" (UniqueName: \"kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305103 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mv5h9\" (UniqueName: \"kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9\") pod \"08ba6d45-1a5b-4131-baae-76160239df48\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305128 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data\") pod \"08ba6d45-1a5b-4131-baae-76160239df48\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305214 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle\") pod \"08ba6d45-1a5b-4131-baae-76160239df48\" (UID: \"08ba6d45-1a5b-4131-baae-76160239df48\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305242 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle\") pod \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305269 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305294 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config\") pod \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305308 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305337 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpfsx\" (UniqueName: \"kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx\") pod \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\" (UID: \"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.305371 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys\") pod \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\" (UID: \"716ef279-3244-4dd1-8ae4-a0b17c4d119e\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.311948 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.320302 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct" (OuterVolumeSpecName: "kube-api-access-cbbct") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "kube-api-access-cbbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.320763 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "08ba6d45-1a5b-4131-baae-76160239df48" (UID: "08ba6d45-1a5b-4131-baae-76160239df48"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.320856 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts" (OuterVolumeSpecName: "scripts") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.326953 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.327816 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx" (OuterVolumeSpecName: "kube-api-access-wpfsx") pod "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" (UID: "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f"). InnerVolumeSpecName "kube-api-access-wpfsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.332101 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9" (OuterVolumeSpecName: "kube-api-access-mv5h9") pod "08ba6d45-1a5b-4131-baae-76160239df48" (UID: "08ba6d45-1a5b-4131-baae-76160239df48"). InnerVolumeSpecName "kube-api-access-mv5h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.333803 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.366011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" (UID: "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.369645 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08ba6d45-1a5b-4131-baae-76160239df48" (UID: "08ba6d45-1a5b-4131-baae-76160239df48"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.371802 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data" (OuterVolumeSpecName: "config-data") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.373457 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "716ef279-3244-4dd1-8ae4-a0b17c4d119e" (UID: "716ef279-3244-4dd1-8ae4-a0b17c4d119e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.381651 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config" (OuterVolumeSpecName: "config") pod "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" (UID: "ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406622 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7dg8\" (UniqueName: \"kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406705 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406734 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle\") pod \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406755 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvrrg\" (UniqueName: \"kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg\") pod \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406802 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406951 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs\") pod \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.406972 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data\") pod \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.407026 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts\") pod \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\" (UID: \"d5ef08b5-420f-46cd-bc10-f021836fd6ee\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.407070 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.407115 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config\") pod \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\" (UID: \"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4\") " Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.409750 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs" (OuterVolumeSpecName: "logs") pod "d5ef08b5-420f-46cd-bc10-f021836fd6ee" (UID: "d5ef08b5-420f-46cd-bc10-f021836fd6ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410857 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410900 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbbct\" (UniqueName: \"kubernetes.io/projected/716ef279-3244-4dd1-8ae4-a0b17c4d119e-kube-api-access-cbbct\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410916 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410930 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mv5h9\" (UniqueName: \"kubernetes.io/projected/08ba6d45-1a5b-4131-baae-76160239df48-kube-api-access-mv5h9\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410946 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410960 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5ef08b5-420f-46cd-bc10-f021836fd6ee-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410971 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ba6d45-1a5b-4131-baae-76160239df48-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410982 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.410999 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.411010 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.411022 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.411033 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpfsx\" (UniqueName: \"kubernetes.io/projected/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f-kube-api-access-wpfsx\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.411050 4852 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/716ef279-3244-4dd1-8ae4-a0b17c4d119e-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.412794 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8" (OuterVolumeSpecName: "kube-api-access-g7dg8") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "kube-api-access-g7dg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.415453 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg" (OuterVolumeSpecName: "kube-api-access-wvrrg") pod "d5ef08b5-420f-46cd-bc10-f021836fd6ee" (UID: "d5ef08b5-420f-46cd-bc10-f021836fd6ee"). InnerVolumeSpecName "kube-api-access-wvrrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.416572 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts" (OuterVolumeSpecName: "scripts") pod "d5ef08b5-420f-46cd-bc10-f021836fd6ee" (UID: "d5ef08b5-420f-46cd-bc10-f021836fd6ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.443236 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5ef08b5-420f-46cd-bc10-f021836fd6ee" (UID: "d5ef08b5-420f-46cd-bc10-f021836fd6ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.457522 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data" (OuterVolumeSpecName: "config-data") pod "d5ef08b5-420f-46cd-bc10-f021836fd6ee" (UID: "d5ef08b5-420f-46cd-bc10-f021836fd6ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.461271 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.461667 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config" (OuterVolumeSpecName: "config") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.461888 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.462913 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.480762 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" (UID: "1e6eb1cd-5a53-43cd-93c2-0596540b6ec4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514664 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514718 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514733 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514745 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7dg8\" (UniqueName: \"kubernetes.io/projected/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-kube-api-access-g7dg8\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514756 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514769 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514782 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvrrg\" (UniqueName: \"kubernetes.io/projected/d5ef08b5-420f-46cd-bc10-f021836fd6ee-kube-api-access-wvrrg\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514831 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514877 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.514892 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5ef08b5-420f-46cd-bc10-f021836fd6ee-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.791105 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" event={"ID":"1e6eb1cd-5a53-43cd-93c2-0596540b6ec4","Type":"ContainerDied","Data":"7b8f5c2c9df49f93f9d138392afecd12635087155b03849b7b00ebe5a3b6760b"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.791174 4852 scope.go:117] "RemoveContainer" containerID="746f1dd9de205fcb69bb9ffeae3ec14fe781bcf2e7c81382b54f3b30ebbced42" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.791356 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-wm7cb" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.798974 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vn9vq" event={"ID":"d5ef08b5-420f-46cd-bc10-f021836fd6ee","Type":"ContainerDied","Data":"8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.799061 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dc1dd2d2ec78504ada1f78026222d3e808b40b2c2e1208b0666e7143e69eb80" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.799070 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vn9vq" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.802795 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jxll9" event={"ID":"716ef279-3244-4dd1-8ae4-a0b17c4d119e","Type":"ContainerDied","Data":"c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.802834 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c87559f455f9862855c32cfc0433f02e075e63c7630d199682544120f0299efc" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.802910 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jxll9" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.811251 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pxxmr" event={"ID":"08ba6d45-1a5b-4131-baae-76160239df48","Type":"ContainerDied","Data":"53b92605876f248bf9a727df66912072064dc9d107ecff1da958c85874137da6"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.811292 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53b92605876f248bf9a727df66912072064dc9d107ecff1da958c85874137da6" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.811345 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pxxmr" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.813370 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n2g46" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.813405 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n2g46" event={"ID":"ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f","Type":"ContainerDied","Data":"b97de7f6ec320143eb91f474e51a7086436b2d7a3b7194ceae48dcd8cfbe2718"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.813445 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b97de7f6ec320143eb91f474e51a7086436b2d7a3b7194ceae48dcd8cfbe2718" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.815889 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerStarted","Data":"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7"} Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.828188 4852 scope.go:117] "RemoveContainer" containerID="547db02870572018ce399db2a9a2c3e8285391a48dca05a9b1d89ef5d427f1e2" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.867312 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.877025 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-wm7cb"] Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.900866 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901378 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="716ef279-3244-4dd1-8ae4-a0b17c4d119e" containerName="keystone-bootstrap" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901410 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="716ef279-3244-4dd1-8ae4-a0b17c4d119e" containerName="keystone-bootstrap" Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901429 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="dnsmasq-dns" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901437 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="dnsmasq-dns" Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901450 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" containerName="placement-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901459 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" containerName="placement-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901482 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08ba6d45-1a5b-4131-baae-76160239df48" containerName="barbican-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901492 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="08ba6d45-1a5b-4131-baae-76160239df48" containerName="barbican-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901504 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" containerName="neutron-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901511 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" containerName="neutron-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: E0129 11:01:12.901541 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="init" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901549 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="init" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901818 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" containerName="placement-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901837 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" containerName="dnsmasq-dns" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901856 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="08ba6d45-1a5b-4131-baae-76160239df48" containerName="barbican-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901868 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" containerName="neutron-db-sync" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.901890 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="716ef279-3244-4dd1-8ae4-a0b17c4d119e" containerName="keystone-bootstrap" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.903066 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.908116 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.909617 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.909845 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.909969 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.911083 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 11:01:12 crc kubenswrapper[4852]: I0129 11:01:12.911899 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mhnlh" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026093 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026480 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026534 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026558 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfsk7\" (UniqueName: \"kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.026673 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128020 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128158 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128188 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128213 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128244 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128268 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfsk7\" (UniqueName: \"kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128288 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.128656 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.135413 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.135935 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.139900 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.142403 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.143825 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.147282 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfsk7\" (UniqueName: \"kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7\") pod \"placement-5b5f6975bd-d8nwg\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.228569 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.526530 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e6eb1cd-5a53-43cd-93c2-0596540b6ec4" path="/var/lib/kubelet/pods/1e6eb1cd-5a53-43cd-93c2-0596540b6ec4/volumes" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.532615 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.545272 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567335 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-99xbg" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567512 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567654 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567672 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567793 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.567955 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.569354 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.618002 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-ncwhs"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.619498 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.641722 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.641912 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.641990 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.642021 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxns\" (UniqueName: \"kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.642075 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.642127 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.642145 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.642182 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.645252 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-ncwhs"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.697631 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.698956 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.706332 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-ftd4b" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.706521 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.713136 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.728672 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.743532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.743600 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxns\" (UniqueName: \"kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.743631 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.743676 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.744856 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.744935 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.744968 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzkd6\" (UniqueName: \"kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.744991 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745013 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745037 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlds4\" (UniqueName: \"kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745075 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745112 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745155 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745192 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745255 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745281 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745317 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.745353 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.764330 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.766596 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.767969 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.772820 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.775270 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.776475 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.782423 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.783016 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.783569 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.800601 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.801637 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.828436 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxns\" (UniqueName: \"kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns\") pod \"keystone-c4f8f88d-2whzw\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.842867 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-ncwhs"] Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.863848 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.863919 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.863962 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864010 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864037 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzkd6\" (UniqueName: \"kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864058 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlds4\" (UniqueName: \"kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864101 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864124 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864150 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864173 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864204 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864230 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864248 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drsjv\" (UniqueName: \"kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864271 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864293 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.864313 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.873470 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.874007 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.882068 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.889663 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.890290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.890847 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.892054 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.901357 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.904860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.951465 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzkd6\" (UniqueName: \"kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6\") pod \"dnsmasq-dns-5ccc5c4795-ncwhs\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.956563 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:13 crc kubenswrapper[4852]: E0129 11:01:13.959338 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-dzkd6 ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" podUID="917c44cc-c548-4f3a-a859-aadaa5c08ec7" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.960235 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlds4\" (UniqueName: \"kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4\") pod \"barbican-worker-59b785bd69-l6jw6\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.966061 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.966148 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.966179 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drsjv\" (UniqueName: \"kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.966238 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.966269 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:13 crc kubenswrapper[4852]: I0129 11:01:13.968358 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.024259 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drsjv\" (UniqueName: \"kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.032419 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.048379 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.057231 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data\") pod \"barbican-keystone-listener-6445cccd4b-jdf45\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.057930 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.101294 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.101683 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.103114 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.169680 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.179847 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200552 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200637 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200665 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200714 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhv4g\" (UniqueName: \"kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200791 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.200813 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.215193 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.216886 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.225062 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.244845 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.267595 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.269161 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.279750 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.284530 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.303694 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.312636 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.312735 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.322737 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qtjdx" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.322929 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.323093 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.323237 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.325033 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335621 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335728 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335755 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrkcf\" (UniqueName: \"kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335802 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335832 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335853 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335873 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.335940 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336065 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k2kr\" (UniqueName: \"kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336187 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336220 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336259 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336275 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336295 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336336 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336351 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336381 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhv4g\" (UniqueName: \"kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.336447 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v587\" (UniqueName: \"kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.337680 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.338557 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.339138 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.340364 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.343951 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.350828 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.370509 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhv4g\" (UniqueName: \"kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g\") pod \"dnsmasq-dns-688c87cc99-vdgvd\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.411801 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.418298 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.429660 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447060 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447101 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447164 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447233 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v587\" (UniqueName: \"kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447266 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447309 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447326 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447342 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrkcf\" (UniqueName: \"kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447376 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447392 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447412 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447451 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nj26\" (UniqueName: \"kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447483 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447539 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447564 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447627 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k2kr\" (UniqueName: \"kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447692 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447717 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.447735 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.452554 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.452920 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.453876 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.456098 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.465177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.466202 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.466305 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.466402 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.467225 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.474795 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.478326 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.478613 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k2kr\" (UniqueName: \"kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr\") pod \"barbican-api-6bccf86c54-qh8mf\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.479229 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrkcf\" (UniqueName: \"kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf\") pod \"barbican-worker-66d6b946b9-8qp8x\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.479245 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v587\" (UniqueName: \"kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.495143 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data\") pod \"barbican-keystone-listener-6cc75d7564-wfkl2\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.560662 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.561130 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.561172 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.561260 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.561297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.569812 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.569867 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.570319 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.570380 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nj26\" (UniqueName: \"kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.570451 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.570497 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r4zd\" (UniqueName: \"kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.570547 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.572731 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.575798 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.576296 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.599192 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nj26\" (UniqueName: \"kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26\") pod \"neutron-5d5c584546-w57m7\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.602828 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.626535 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.673418 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.674857 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.674889 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.674991 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r4zd\" (UniqueName: \"kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.675038 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.675092 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.675498 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.683394 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.684261 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.689043 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.702503 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.703609 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r4zd\" (UniqueName: \"kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd\") pod \"barbican-api-5b854ff6b8-lksr4\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.727237 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.806301 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.872267 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.980788 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerStarted","Data":"3949177a1a88dba32b3c67ee67ae43de9ff434d49383b5d5cc8f3b4d955a2354"} Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.980855 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerStarted","Data":"e4f94c23b1be17e7f0be8e2fa9ddd10dc135dc085590b6bf97c60da54aeeada5"} Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.982643 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:14 crc kubenswrapper[4852]: I0129 11:01:14.983666 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerStarted","Data":"39b5a13133b5b116f18c46ae0f38098ea35c205ba5255b2c3b88fea53bd51081"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.001453 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:01:15 crc kubenswrapper[4852]: W0129 11:01:15.014174 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5611a301_79d2_4082_beba_c95db2a2bcad.slice/crio-496da8b54a5a1c14319b94d4309a09b3a3574c099b65fb811e21ce9ebf17b180 WatchSource:0}: Error finding container 496da8b54a5a1c14319b94d4309a09b3a3574c099b65fb811e21ce9ebf17b180: Status 404 returned error can't find the container with id 496da8b54a5a1c14319b94d4309a09b3a3574c099b65fb811e21ce9ebf17b180 Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.286052 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.290618 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:15 crc kubenswrapper[4852]: W0129 11:01:15.299603 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6c46b43_1c5f_4ed7_a286_757b9a5e73e8.slice/crio-3d8e5a691339e6915a41eb92500d67bee415764f21a1b5761fa625bb840471bb WatchSource:0}: Error finding container 3d8e5a691339e6915a41eb92500d67bee415764f21a1b5761fa625bb840471bb: Status 404 returned error can't find the container with id 3d8e5a691339e6915a41eb92500d67bee415764f21a1b5761fa625bb840471bb Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.300184 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.396353 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.396957 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397023 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397138 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397168 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397207 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzkd6\" (UniqueName: \"kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397293 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb\") pod \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\" (UID: \"917c44cc-c548-4f3a-a859-aadaa5c08ec7\") " Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397555 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397741 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.397832 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config" (OuterVolumeSpecName: "config") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.398090 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.398120 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.398132 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.398145 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.398128 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.403461 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6" (OuterVolumeSpecName: "kube-api-access-dzkd6") pod "917c44cc-c548-4f3a-a859-aadaa5c08ec7" (UID: "917c44cc-c548-4f3a-a859-aadaa5c08ec7"). InnerVolumeSpecName "kube-api-access-dzkd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.500484 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzkd6\" (UniqueName: \"kubernetes.io/projected/917c44cc-c548-4f3a-a859-aadaa5c08ec7-kube-api-access-dzkd6\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.500522 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/917c44cc-c548-4f3a-a859-aadaa5c08ec7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:15 crc kubenswrapper[4852]: W0129 11:01:15.573895 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23ac92d2_cb89_4381_84cd_1f6757afc78b.slice/crio-640439362f7959e63be798f2dcb25045fa0da8834d03674f2b2f137fc46e10ba WatchSource:0}: Error finding container 640439362f7959e63be798f2dcb25045fa0da8834d03674f2b2f137fc46e10ba: Status 404 returned error can't find the container with id 640439362f7959e63be798f2dcb25045fa0da8834d03674f2b2f137fc46e10ba Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.576823 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.602081 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.624218 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.680899 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.992747 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c4f8f88d-2whzw" event={"ID":"5611a301-79d2-4082-beba-c95db2a2bcad","Type":"ContainerStarted","Data":"496da8b54a5a1c14319b94d4309a09b3a3574c099b65fb811e21ce9ebf17b180"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.994574 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerStarted","Data":"92f0d700522fa0f0d50e01f618b4306f640669a29fe1266e7297d9c5f90faaf8"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.995465 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" event={"ID":"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8","Type":"ContainerStarted","Data":"3d8e5a691339e6915a41eb92500d67bee415764f21a1b5761fa625bb840471bb"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.996669 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerStarted","Data":"640439362f7959e63be798f2dcb25045fa0da8834d03674f2b2f137fc46e10ba"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.997733 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerStarted","Data":"deb86643e87f681fea98cc6119ea17c4a82d37ebfee1f219c03c61b0f44de3fb"} Jan 29 11:01:15 crc kubenswrapper[4852]: I0129 11:01:15.999100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerStarted","Data":"ef1265cf13d3e1a654b01a6832f881ee18608b3a2c06d460c335afd5e2aa3dc6"} Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.000374 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerStarted","Data":"59d85378e7b03c7a3e129a316a32d09389f3ff4be43f74e43422007594b627ec"} Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.001711 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerStarted","Data":"80a289af8fa836f56def5b749b72ca50195b93576e353b63c8abb46ac6157168"} Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.001745 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-ncwhs" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.220739 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.220824 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.270041 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.275919 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.330188 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-ncwhs"] Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.356233 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-ncwhs"] Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.410967 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.411022 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.475625 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 11:01:16 crc kubenswrapper[4852]: I0129 11:01:16.492803 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.021125 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerStarted","Data":"d2ebe4dcbc8ae0bc7c37061797a6a738c81fd818fae0c31cb5e242d628b9995e"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.021394 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.021618 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.047533 4852 generic.go:334] "Generic (PLEG): container finished" podID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerID="ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0" exitCode=0 Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.047836 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" event={"ID":"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8","Type":"ContainerDied","Data":"ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.092923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerStarted","Data":"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.093018 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerStarted","Data":"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.093425 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.093869 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.103871 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5b5f6975bd-d8nwg" podStartSLOduration=5.103837847 podStartE2EDuration="5.103837847s" podCreationTimestamp="2026-01-29 11:01:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:17.053697527 +0000 UTC m=+1174.271028661" watchObservedRunningTime="2026-01-29 11:01:17.103837847 +0000 UTC m=+1174.321168981" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.120147 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.120413 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerStarted","Data":"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.120510 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerStarted","Data":"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.168912 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerStarted","Data":"81b125c112e526a1fa48a67da804188670ce01adf6a77c11f2fa98a27658fd46"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.168999 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerStarted","Data":"03d384e3798d4819d64bab45a2551aab99719b6bca97232f1415162c9ff35c04"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.169070 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.169104 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.185563 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c4f8f88d-2whzw" event={"ID":"5611a301-79d2-4082-beba-c95db2a2bcad","Type":"ContainerStarted","Data":"6309e1fbaf859c6c9e8f0f198002bde742b73cff0fec560c9a826574d4ae297a"} Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.185911 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.186075 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.186194 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.186328 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.186452 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.303252 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5b854ff6b8-lksr4" podStartSLOduration=3.3032327 podStartE2EDuration="3.3032327s" podCreationTimestamp="2026-01-29 11:01:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:17.191891639 +0000 UTC m=+1174.409222783" watchObservedRunningTime="2026-01-29 11:01:17.3032327 +0000 UTC m=+1174.520563834" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.306210 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5d5c584546-w57m7" podStartSLOduration=3.306198273 podStartE2EDuration="3.306198273s" podCreationTimestamp="2026-01-29 11:01:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:17.228827825 +0000 UTC m=+1174.446158959" watchObservedRunningTime="2026-01-29 11:01:17.306198273 +0000 UTC m=+1174.523529407" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.322007 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-c4f8f88d-2whzw" podStartSLOduration=4.321989001 podStartE2EDuration="4.321989001s" podCreationTimestamp="2026-01-29 11:01:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:17.259871526 +0000 UTC m=+1174.477202670" watchObservedRunningTime="2026-01-29 11:01:17.321989001 +0000 UTC m=+1174.539320135" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.353995 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6bccf86c54-qh8mf" podStartSLOduration=4.353926784 podStartE2EDuration="4.353926784s" podCreationTimestamp="2026-01-29 11:01:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:17.29953411 +0000 UTC m=+1174.516865244" watchObservedRunningTime="2026-01-29 11:01:17.353926784 +0000 UTC m=+1174.571257918" Jan 29 11:01:17 crc kubenswrapper[4852]: I0129 11:01:17.487339 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917c44cc-c548-4f3a-a859-aadaa5c08ec7" path="/var/lib/kubelet/pods/917c44cc-c548-4f3a-a859-aadaa5c08ec7/volumes" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.104085 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.106405 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.108545 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.108723 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.124840 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.202624 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" event={"ID":"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8","Type":"ContainerStarted","Data":"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b"} Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.202977 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.210009 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9kkxs" event={"ID":"88726a03-9dc5-49b5-b4cc-60b521b51d61","Type":"ContainerStarted","Data":"b8bd41006cef6fa1ebbdb27fa01c94f2f989cd072c9a79042467310c603afff2"} Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.233284 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" podStartSLOduration=5.233256084 podStartE2EDuration="5.233256084s" podCreationTimestamp="2026-01-29 11:01:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:18.226099539 +0000 UTC m=+1175.443430683" watchObservedRunningTime="2026-01-29 11:01:18.233256084 +0000 UTC m=+1175.450587218" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.258299 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-9kkxs" podStartSLOduration=3.549988714 podStartE2EDuration="39.258278658s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="2026-01-29 11:00:40.629612296 +0000 UTC m=+1137.846943430" lastFinishedPulling="2026-01-29 11:01:16.33790224 +0000 UTC m=+1173.555233374" observedRunningTime="2026-01-29 11:01:18.251921863 +0000 UTC m=+1175.469252997" watchObservedRunningTime="2026-01-29 11:01:18.258278658 +0000 UTC m=+1175.475609792" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.282722 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pln8b\" (UniqueName: \"kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.282997 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.283127 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.283277 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.283392 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.283570 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.283706 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.386532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pln8b\" (UniqueName: \"kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.386691 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.386755 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.386884 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.390695 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.391640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.393410 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.398753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.398752 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.401113 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.401701 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.401941 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.404259 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.409391 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pln8b\" (UniqueName: \"kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b\") pod \"neutron-7f4fbff985-ww2n4\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:18 crc kubenswrapper[4852]: I0129 11:01:18.425254 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.229389 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.229728 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.453214 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.453483 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.510640 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.690200 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:19 crc kubenswrapper[4852]: I0129 11:01:19.690269 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.482973 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.484675 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6bccf86c54-qh8mf" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api-log" containerID="cri-o://03d384e3798d4819d64bab45a2551aab99719b6bca97232f1415162c9ff35c04" gracePeriod=30 Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.484734 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6bccf86c54-qh8mf" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api" containerID="cri-o://81b125c112e526a1fa48a67da804188670ce01adf6a77c11f2fa98a27658fd46" gracePeriod=30 Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.536223 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.543675 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.547979 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.548204 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.552263 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjwfs\" (UniqueName: \"kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635519 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635572 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635608 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635633 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.635677 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737285 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737334 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737351 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737378 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737406 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737447 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjwfs\" (UniqueName: \"kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.737548 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.740069 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.743287 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.743369 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.744213 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.744303 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.744920 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.755477 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjwfs\" (UniqueName: \"kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs\") pod \"barbican-api-588c766876-422z6\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:20 crc kubenswrapper[4852]: I0129 11:01:20.891493 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:21 crc kubenswrapper[4852]: I0129 11:01:21.273705 4852 generic.go:334] "Generic (PLEG): container finished" podID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerID="81b125c112e526a1fa48a67da804188670ce01adf6a77c11f2fa98a27658fd46" exitCode=0 Jan 29 11:01:21 crc kubenswrapper[4852]: I0129 11:01:21.274020 4852 generic.go:334] "Generic (PLEG): container finished" podID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerID="03d384e3798d4819d64bab45a2551aab99719b6bca97232f1415162c9ff35c04" exitCode=143 Jan 29 11:01:21 crc kubenswrapper[4852]: I0129 11:01:21.273777 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerDied","Data":"81b125c112e526a1fa48a67da804188670ce01adf6a77c11f2fa98a27658fd46"} Jan 29 11:01:21 crc kubenswrapper[4852]: I0129 11:01:21.274061 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerDied","Data":"03d384e3798d4819d64bab45a2551aab99719b6bca97232f1415162c9ff35c04"} Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.401688 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:01:23 crc kubenswrapper[4852]: W0129 11:01:23.598921 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2683963a_32cd_488b_84f8_9222fc66a2b2.slice/crio-3b524eed80f85c37c54ee5c4f63066909b50a5307e2c22131f8067b6b9e09b98 WatchSource:0}: Error finding container 3b524eed80f85c37c54ee5c4f63066909b50a5307e2c22131f8067b6b9e09b98: Status 404 returned error can't find the container with id 3b524eed80f85c37c54ee5c4f63066909b50a5307e2c22131f8067b6b9e09b98 Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.682086 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.690402 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k2kr\" (UniqueName: \"kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr\") pod \"695c2ec0-f202-49bd-828d-e7aafce54ef4\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.690523 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle\") pod \"695c2ec0-f202-49bd-828d-e7aafce54ef4\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.690943 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data\") pod \"695c2ec0-f202-49bd-828d-e7aafce54ef4\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.691080 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom\") pod \"695c2ec0-f202-49bd-828d-e7aafce54ef4\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.691169 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs\") pod \"695c2ec0-f202-49bd-828d-e7aafce54ef4\" (UID: \"695c2ec0-f202-49bd-828d-e7aafce54ef4\") " Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.692948 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs" (OuterVolumeSpecName: "logs") pod "695c2ec0-f202-49bd-828d-e7aafce54ef4" (UID: "695c2ec0-f202-49bd-828d-e7aafce54ef4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.697792 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr" (OuterVolumeSpecName: "kube-api-access-2k2kr") pod "695c2ec0-f202-49bd-828d-e7aafce54ef4" (UID: "695c2ec0-f202-49bd-828d-e7aafce54ef4"). InnerVolumeSpecName "kube-api-access-2k2kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.701698 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "695c2ec0-f202-49bd-828d-e7aafce54ef4" (UID: "695c2ec0-f202-49bd-828d-e7aafce54ef4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.750778 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data" (OuterVolumeSpecName: "config-data") pod "695c2ec0-f202-49bd-828d-e7aafce54ef4" (UID: "695c2ec0-f202-49bd-828d-e7aafce54ef4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.754910 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "695c2ec0-f202-49bd-828d-e7aafce54ef4" (UID: "695c2ec0-f202-49bd-828d-e7aafce54ef4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.793403 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.793435 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695c2ec0-f202-49bd-828d-e7aafce54ef4-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.793446 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k2kr\" (UniqueName: \"kubernetes.io/projected/695c2ec0-f202-49bd-828d-e7aafce54ef4-kube-api-access-2k2kr\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.793455 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:23 crc kubenswrapper[4852]: I0129 11:01:23.793463 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695c2ec0-f202-49bd-828d-e7aafce54ef4-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.305923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bccf86c54-qh8mf" event={"ID":"695c2ec0-f202-49bd-828d-e7aafce54ef4","Type":"ContainerDied","Data":"59d85378e7b03c7a3e129a316a32d09389f3ff4be43f74e43422007594b627ec"} Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.306046 4852 scope.go:117] "RemoveContainer" containerID="81b125c112e526a1fa48a67da804188670ce01adf6a77c11f2fa98a27658fd46" Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.306131 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bccf86c54-qh8mf" Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.309511 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerStarted","Data":"3b524eed80f85c37c54ee5c4f63066909b50a5307e2c22131f8067b6b9e09b98"} Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.343316 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.350109 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6bccf86c54-qh8mf"] Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.424263 4852 scope.go:117] "RemoveContainer" containerID="03d384e3798d4819d64bab45a2551aab99719b6bca97232f1415162c9ff35c04" Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.573830 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.657122 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.662283 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="dnsmasq-dns" containerID="cri-o://7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25" gracePeriod=10 Jan 29 11:01:24 crc kubenswrapper[4852]: I0129 11:01:24.915244 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.236247 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.354884 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerStarted","Data":"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93"} Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.355044 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-central-agent" containerID="cri-o://b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf" gracePeriod=30 Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.355116 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.355426 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="proxy-httpd" containerID="cri-o://078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93" gracePeriod=30 Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.355468 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="sg-core" containerID="cri-o://966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7" gracePeriod=30 Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.355502 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-notification-agent" containerID="cri-o://a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a" gracePeriod=30 Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.368887 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerStarted","Data":"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173"} Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.382356 4852 generic.go:334] "Generic (PLEG): container finished" podID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerID="7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25" exitCode=0 Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.382690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" event={"ID":"33e33319-f0fa-4c80-af5f-47a6fc4e7e23","Type":"ContainerDied","Data":"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25"} Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.383028 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" event={"ID":"33e33319-f0fa-4c80-af5f-47a6fc4e7e23","Type":"ContainerDied","Data":"807f30cc49ba546b7def8ca35bfcc32f22f0aa8bb86221cb243042013937c9e2"} Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.383061 4852 scope.go:117] "RemoveContainer" containerID="7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.383860 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-t9jrt" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.390875 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.159978084 podStartE2EDuration="46.390855221s" podCreationTimestamp="2026-01-29 11:00:39 +0000 UTC" firstStartedPulling="2026-01-29 11:00:41.331556223 +0000 UTC m=+1138.548887357" lastFinishedPulling="2026-01-29 11:01:24.56243335 +0000 UTC m=+1181.779764494" observedRunningTime="2026-01-29 11:01:25.377602425 +0000 UTC m=+1182.594933579" watchObservedRunningTime="2026-01-29 11:01:25.390855221 +0000 UTC m=+1182.608186345" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.397805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerStarted","Data":"b8a7ceeceb73045f64c69bea74d5dea7884e3a3e03aa3a441daa916c3097cd3d"} Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437018 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437203 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mndxh\" (UniqueName: \"kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437249 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437283 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437369 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.437445 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config\") pod \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\" (UID: \"33e33319-f0fa-4c80-af5f-47a6fc4e7e23\") " Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.452771 4852 scope.go:117] "RemoveContainer" containerID="4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.453505 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh" (OuterVolumeSpecName: "kube-api-access-mndxh") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "kube-api-access-mndxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.492569 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" path="/var/lib/kubelet/pods/695c2ec0-f202-49bd-828d-e7aafce54ef4/volumes" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.513363 4852 scope.go:117] "RemoveContainer" containerID="7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25" Jan 29 11:01:25 crc kubenswrapper[4852]: E0129 11:01:25.514870 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25\": container with ID starting with 7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25 not found: ID does not exist" containerID="7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.514929 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25"} err="failed to get container status \"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25\": rpc error: code = NotFound desc = could not find container \"7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25\": container with ID starting with 7b84febf48f771f13c0ff12b02c8ecd313a86428b2816f95247a9c7c8800fc25 not found: ID does not exist" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.514966 4852 scope.go:117] "RemoveContainer" containerID="4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227" Jan 29 11:01:25 crc kubenswrapper[4852]: E0129 11:01:25.515246 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227\": container with ID starting with 4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227 not found: ID does not exist" containerID="4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.515274 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227"} err="failed to get container status \"4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227\": rpc error: code = NotFound desc = could not find container \"4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227\": container with ID starting with 4ae1653cf801a9d980220ba95d4caa56b50a2af1b6dd1d6723206a6712835227 not found: ID does not exist" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.539394 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mndxh\" (UniqueName: \"kubernetes.io/projected/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-kube-api-access-mndxh\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.621503 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.625042 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.641994 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.642022 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.708264 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.708933 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config" (OuterVolumeSpecName: "config") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.723347 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33e33319-f0fa-4c80-af5f-47a6fc4e7e23" (UID: "33e33319-f0fa-4c80-af5f-47a6fc4e7e23"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.743540 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.743600 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:25 crc kubenswrapper[4852]: I0129 11:01:25.743615 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33e33319-f0fa-4c80-af5f-47a6fc4e7e23-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.016152 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.028114 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-t9jrt"] Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.407045 4852 generic.go:334] "Generic (PLEG): container finished" podID="88726a03-9dc5-49b5-b4cc-60b521b51d61" containerID="b8bd41006cef6fa1ebbdb27fa01c94f2f989cd072c9a79042467310c603afff2" exitCode=0 Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.407097 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9kkxs" event={"ID":"88726a03-9dc5-49b5-b4cc-60b521b51d61","Type":"ContainerDied","Data":"b8bd41006cef6fa1ebbdb27fa01c94f2f989cd072c9a79042467310c603afff2"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.408928 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerStarted","Data":"02a21826da03d176e3af7a859639e921f42ebe93b8e3d176115585c9b0fb9752"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.408955 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerStarted","Data":"32ee64127b13aab5bf090c833967ae28b8675a965dc8eb24e8e5d01c8cd166f4"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.409051 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.410589 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerStarted","Data":"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.410612 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerStarted","Data":"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.412808 4852 generic.go:334] "Generic (PLEG): container finished" podID="f988dca9-b8dd-406e-b316-d27052f43c80" containerID="966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7" exitCode=2 Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.412838 4852 generic.go:334] "Generic (PLEG): container finished" podID="f988dca9-b8dd-406e-b316-d27052f43c80" containerID="b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf" exitCode=0 Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.412886 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerDied","Data":"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.412923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerDied","Data":"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.414479 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerStarted","Data":"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.414619 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.415944 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerStarted","Data":"8366b4f53588b202defa86ab08279ed8a31501c51a1b057517aab806551203c0"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.415966 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerStarted","Data":"a1df71499e1fd5786e8f8ce1f972f8c5cdb23d2e76c158581681aaf76b9972b9"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.417417 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerStarted","Data":"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.417440 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerStarted","Data":"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.418790 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerStarted","Data":"aa5685bc516d6db550494e4d88ddd8fe48813b31062f4470210e7804ed6c4c11"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.418812 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerStarted","Data":"430bb8180e977735b3830ae9b80ddb4c04224564e891d55f48ab0ca2914dbb58"} Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.453379 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f4fbff985-ww2n4" podStartSLOduration=8.453363646 podStartE2EDuration="8.453363646s" podCreationTimestamp="2026-01-29 11:01:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:26.447996235 +0000 UTC m=+1183.665327369" watchObservedRunningTime="2026-01-29 11:01:26.453363646 +0000 UTC m=+1183.670694780" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.469093 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-66d6b946b9-8qp8x" podStartSLOduration=4.47997222 podStartE2EDuration="12.469078202s" podCreationTimestamp="2026-01-29 11:01:14 +0000 UTC" firstStartedPulling="2026-01-29 11:01:15.583725872 +0000 UTC m=+1172.801057006" lastFinishedPulling="2026-01-29 11:01:23.572831854 +0000 UTC m=+1180.790162988" observedRunningTime="2026-01-29 11:01:26.464068269 +0000 UTC m=+1183.681399403" watchObservedRunningTime="2026-01-29 11:01:26.469078202 +0000 UTC m=+1183.686409336" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.513954 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.533101 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" podStartSLOduration=4.509051003 podStartE2EDuration="12.533083412s" podCreationTimestamp="2026-01-29 11:01:14 +0000 UTC" firstStartedPulling="2026-01-29 11:01:15.577473969 +0000 UTC m=+1172.794805103" lastFinishedPulling="2026-01-29 11:01:23.601506378 +0000 UTC m=+1180.818837512" observedRunningTime="2026-01-29 11:01:26.513490791 +0000 UTC m=+1183.730821925" watchObservedRunningTime="2026-01-29 11:01:26.533083412 +0000 UTC m=+1183.750414546" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.534425 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.564228 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-59b785bd69-l6jw6" podStartSLOduration=4.122470185 podStartE2EDuration="13.564205837s" podCreationTimestamp="2026-01-29 11:01:13 +0000 UTC" firstStartedPulling="2026-01-29 11:01:14.966520195 +0000 UTC m=+1172.183851329" lastFinishedPulling="2026-01-29 11:01:24.408255847 +0000 UTC m=+1181.625586981" observedRunningTime="2026-01-29 11:01:26.554422517 +0000 UTC m=+1183.771753651" watchObservedRunningTime="2026-01-29 11:01:26.564205837 +0000 UTC m=+1183.781536971" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.589835 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" podStartSLOduration=5.601179484 podStartE2EDuration="13.589814285s" podCreationTimestamp="2026-01-29 11:01:13 +0000 UTC" firstStartedPulling="2026-01-29 11:01:14.809185524 +0000 UTC m=+1172.026516668" lastFinishedPulling="2026-01-29 11:01:22.797820335 +0000 UTC m=+1180.015151469" observedRunningTime="2026-01-29 11:01:26.586031912 +0000 UTC m=+1183.803363046" watchObservedRunningTime="2026-01-29 11:01:26.589814285 +0000 UTC m=+1183.807145419" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.612887 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-588c766876-422z6" podStartSLOduration=6.61286949 podStartE2EDuration="6.61286949s" podCreationTimestamp="2026-01-29 11:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:26.603303216 +0000 UTC m=+1183.820634350" watchObservedRunningTime="2026-01-29 11:01:26.61286949 +0000 UTC m=+1183.830200614" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.731305 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:26 crc kubenswrapper[4852]: I0129 11:01:26.757762 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.429524 4852 generic.go:334] "Generic (PLEG): container finished" podID="f988dca9-b8dd-406e-b316-d27052f43c80" containerID="a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a" exitCode=0 Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.429674 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerDied","Data":"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a"} Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.432725 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.474293 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" path="/var/lib/kubelet/pods/33e33319-f0fa-4c80-af5f-47a6fc4e7e23/volumes" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.800377 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.893725 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.893958 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.893990 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.894017 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.894034 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.894055 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nxz5\" (UniqueName: \"kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.894512 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.900185 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts" (OuterVolumeSpecName: "scripts") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.913972 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5" (OuterVolumeSpecName: "kube-api-access-9nxz5") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "kube-api-access-9nxz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.915074 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:27 crc kubenswrapper[4852]: E0129 11:01:27.942818 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle podName:88726a03-9dc5-49b5-b4cc-60b521b51d61 nodeName:}" failed. No retries permitted until 2026-01-29 11:01:28.442794498 +0000 UTC m=+1185.660125632 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61") : error deleting /var/lib/kubelet/pods/88726a03-9dc5-49b5-b4cc-60b521b51d61/volume-subpaths: remove /var/lib/kubelet/pods/88726a03-9dc5-49b5-b4cc-60b521b51d61/volume-subpaths: no such file or directory Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.945832 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data" (OuterVolumeSpecName: "config-data") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.996248 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.996279 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88726a03-9dc5-49b5-b4cc-60b521b51d61-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.996292 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.996301 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:27 crc kubenswrapper[4852]: I0129 11:01:27.996309 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nxz5\" (UniqueName: \"kubernetes.io/projected/88726a03-9dc5-49b5-b4cc-60b521b51d61-kube-api-access-9nxz5\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.444600 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9kkxs" event={"ID":"88726a03-9dc5-49b5-b4cc-60b521b51d61","Type":"ContainerDied","Data":"810adc495248c520a873e86ec26e443fb508609ceed54c8e342c7f9b54e48a9e"} Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.444656 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="810adc495248c520a873e86ec26e443fb508609ceed54c8e342c7f9b54e48a9e" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.445116 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9kkxs" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.445179 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener" containerID="cri-o://863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19" gracePeriod=30 Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.445064 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener-log" containerID="cri-o://ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a" gracePeriod=30 Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.445653 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-59b785bd69-l6jw6" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker-log" containerID="cri-o://aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" gracePeriod=30 Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.445737 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-59b785bd69-l6jw6" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker" containerID="cri-o://3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" gracePeriod=30 Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.504633 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") pod \"88726a03-9dc5-49b5-b4cc-60b521b51d61\" (UID: \"88726a03-9dc5-49b5-b4cc-60b521b51d61\") " Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.514816 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88726a03-9dc5-49b5-b4cc-60b521b51d61" (UID: "88726a03-9dc5-49b5-b4cc-60b521b51d61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.609078 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88726a03-9dc5-49b5-b4cc-60b521b51d61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.633821 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:28 crc kubenswrapper[4852]: E0129 11:01:28.634211 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" containerName="cinder-db-sync" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634226 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" containerName="cinder-db-sync" Jan 29 11:01:28 crc kubenswrapper[4852]: E0129 11:01:28.634247 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634256 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api" Jan 29 11:01:28 crc kubenswrapper[4852]: E0129 11:01:28.634276 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="dnsmasq-dns" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634282 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="dnsmasq-dns" Jan 29 11:01:28 crc kubenswrapper[4852]: E0129 11:01:28.634301 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api-log" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634307 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api-log" Jan 29 11:01:28 crc kubenswrapper[4852]: E0129 11:01:28.634316 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="init" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634321 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="init" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634476 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="33e33319-f0fa-4c80-af5f-47a6fc4e7e23" containerName="dnsmasq-dns" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634491 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" containerName="cinder-db-sync" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634500 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.634512 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="695c2ec0-f202-49bd-828d-e7aafce54ef4" containerName="barbican-api-log" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.635409 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.642767 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.652813 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.734651 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.736564 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.746576 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813252 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5zvw\" (UniqueName: \"kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813319 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813351 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813422 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.813445 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.822470 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.824313 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.833882 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.843949 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.914997 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5zvw\" (UniqueName: \"kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915045 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915065 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915112 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915132 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915151 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwx2f\" (UniqueName: \"kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915166 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915183 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgj6p\" (UniqueName: \"kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915204 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915220 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915252 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915272 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915290 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915308 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915356 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915374 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915404 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915421 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.915954 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.922251 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.922619 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.923070 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.923073 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:28 crc kubenswrapper[4852]: I0129 11:01:28.941626 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5zvw\" (UniqueName: \"kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw\") pod \"cinder-scheduler-0\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.005405 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016524 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016598 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016643 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016669 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016699 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwx2f\" (UniqueName: \"kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016721 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016745 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgj6p\" (UniqueName: \"kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016771 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016815 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016850 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016874 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016931 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.016957 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.017090 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.017224 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.018096 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.018260 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.018275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.018401 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.020753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.021054 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.021392 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.022067 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.023614 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.036377 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgj6p\" (UniqueName: \"kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p\") pod \"cinder-api-0\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.036458 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwx2f\" (UniqueName: \"kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f\") pod \"dnsmasq-dns-6bb4fc677f-fb2bm\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.068185 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.157696 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.476760 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerID="3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" exitCode=0 Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.476800 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerID="aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" exitCode=143 Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.479329 4852 generic.go:334] "Generic (PLEG): container finished" podID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerID="ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a" exitCode=143 Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.481819 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.487033 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerDied","Data":"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c"} Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.487118 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerDied","Data":"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60"} Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.487131 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59b785bd69-l6jw6" event={"ID":"0e78d444-dc43-4deb-9c46-a027c64b7ed5","Type":"ContainerDied","Data":"92f0d700522fa0f0d50e01f618b4306f640669a29fe1266e7297d9c5f90faaf8"} Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.487144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerDied","Data":"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a"} Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.487168 4852 scope.go:117] "RemoveContainer" containerID="3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.536286 4852 scope.go:117] "RemoveContainer" containerID="aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.635185 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle\") pod \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.635289 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlds4\" (UniqueName: \"kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4\") pod \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.635370 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom\") pod \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.635527 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data\") pod \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.635617 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs\") pod \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\" (UID: \"0e78d444-dc43-4deb-9c46-a027c64b7ed5\") " Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.647769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs" (OuterVolumeSpecName: "logs") pod "0e78d444-dc43-4deb-9c46-a027c64b7ed5" (UID: "0e78d444-dc43-4deb-9c46-a027c64b7ed5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.656384 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0e78d444-dc43-4deb-9c46-a027c64b7ed5" (UID: "0e78d444-dc43-4deb-9c46-a027c64b7ed5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.686901 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4" (OuterVolumeSpecName: "kube-api-access-xlds4") pod "0e78d444-dc43-4deb-9c46-a027c64b7ed5" (UID: "0e78d444-dc43-4deb-9c46-a027c64b7ed5"). InnerVolumeSpecName "kube-api-access-xlds4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.707671 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.746322 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.754977 4852 scope.go:117] "RemoveContainer" containerID="3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.760189 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e78d444-dc43-4deb-9c46-a027c64b7ed5-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.760221 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlds4\" (UniqueName: \"kubernetes.io/projected/0e78d444-dc43-4deb-9c46-a027c64b7ed5-kube-api-access-xlds4\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.760232 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.766058 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e78d444-dc43-4deb-9c46-a027c64b7ed5" (UID: "0e78d444-dc43-4deb-9c46-a027c64b7ed5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:29 crc kubenswrapper[4852]: E0129 11:01:29.767049 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c\": container with ID starting with 3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c not found: ID does not exist" containerID="3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.767092 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c"} err="failed to get container status \"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c\": rpc error: code = NotFound desc = could not find container \"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c\": container with ID starting with 3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c not found: ID does not exist" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.767117 4852 scope.go:117] "RemoveContainer" containerID="aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" Jan 29 11:01:29 crc kubenswrapper[4852]: E0129 11:01:29.769763 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60\": container with ID starting with aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60 not found: ID does not exist" containerID="aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.769804 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60"} err="failed to get container status \"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60\": rpc error: code = NotFound desc = could not find container \"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60\": container with ID starting with aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60 not found: ID does not exist" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.769830 4852 scope.go:117] "RemoveContainer" containerID="3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.775823 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data" (OuterVolumeSpecName: "config-data") pod "0e78d444-dc43-4deb-9c46-a027c64b7ed5" (UID: "0e78d444-dc43-4deb-9c46-a027c64b7ed5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.780819 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c"} err="failed to get container status \"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c\": rpc error: code = NotFound desc = could not find container \"3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c\": container with ID starting with 3efad55bbc18ac3dcf9ce382d7e3a5507284c75cb076ae717c14d17cad5eaf5c not found: ID does not exist" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.780866 4852 scope.go:117] "RemoveContainer" containerID="aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.790635 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60"} err="failed to get container status \"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60\": rpc error: code = NotFound desc = could not find container \"aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60\": container with ID starting with aeea8d5554cd6e73da177251c261ee2837cc422cfc410f5148afdf00fb093f60 not found: ID does not exist" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.861918 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.862177 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e78d444-dc43-4deb-9c46-a027c64b7ed5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:29 crc kubenswrapper[4852]: I0129 11:01:29.866279 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:29 crc kubenswrapper[4852]: W0129 11:01:29.871386 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc374ee3b_421a_497d_9058_bf4f3efaa4fb.slice/crio-e0f01ba6a80e5a2a7b1f8e5544131994d51b6c4ae8587d8e12050d15f660fed9 WatchSource:0}: Error finding container e0f01ba6a80e5a2a7b1f8e5544131994d51b6c4ae8587d8e12050d15f660fed9: Status 404 returned error can't find the container with id e0f01ba6a80e5a2a7b1f8e5544131994d51b6c4ae8587d8e12050d15f660fed9 Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.152246 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273116 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drsjv\" (UniqueName: \"kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv\") pod \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273198 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs\") pod \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273233 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data\") pod \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273356 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle\") pod \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273538 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom\") pod \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\" (UID: \"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191\") " Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.273606 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs" (OuterVolumeSpecName: "logs") pod "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" (UID: "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.274011 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.284640 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" (UID: "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.303866 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv" (OuterVolumeSpecName: "kube-api-access-drsjv") pod "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" (UID: "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191"). InnerVolumeSpecName "kube-api-access-drsjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.347661 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" (UID: "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.369321 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data" (OuterVolumeSpecName: "config-data") pod "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" (UID: "ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.378826 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.378877 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.378890 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drsjv\" (UniqueName: \"kubernetes.io/projected/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-kube-api-access-drsjv\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.378901 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.502412 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerStarted","Data":"e784083fa0349a7eaf65fe1c5b588951ddc4175bafe999e86648cd887e4d22d1"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.503595 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59b785bd69-l6jw6" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.504270 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerStarted","Data":"e0f01ba6a80e5a2a7b1f8e5544131994d51b6c4ae8587d8e12050d15f660fed9"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.508424 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" event={"ID":"7425bd14-f51f-408c-8fa0-749ce9aa74c7","Type":"ContainerDied","Data":"e4bd4060c81becf02dfdec18bec7bb87edd2e53e73362e58cbe011ad2505c1cd"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.513752 4852 generic.go:334] "Generic (PLEG): container finished" podID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerID="e4bd4060c81becf02dfdec18bec7bb87edd2e53e73362e58cbe011ad2505c1cd" exitCode=0 Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.513917 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" event={"ID":"7425bd14-f51f-408c-8fa0-749ce9aa74c7","Type":"ContainerStarted","Data":"d70f315c3fb8e255c5172504d9903fa86c29aaa900f550623954272bafb1ca81"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.517313 4852 generic.go:334] "Generic (PLEG): container finished" podID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerID="863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19" exitCode=0 Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.517359 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerDied","Data":"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.517390 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" event={"ID":"ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191","Type":"ContainerDied","Data":"39b5a13133b5b116f18c46ae0f38098ea35c205ba5255b2c3b88fea53bd51081"} Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.517406 4852 scope.go:117] "RemoveContainer" containerID="863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.517539 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6445cccd4b-jdf45" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.558722 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.569340 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-59b785bd69-l6jw6"] Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.589545 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.600175 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-6445cccd4b-jdf45"] Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.669699 4852 scope.go:117] "RemoveContainer" containerID="ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.732618 4852 scope.go:117] "RemoveContainer" containerID="863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19" Jan 29 11:01:30 crc kubenswrapper[4852]: E0129 11:01:30.732931 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19\": container with ID starting with 863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19 not found: ID does not exist" containerID="863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.732958 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19"} err="failed to get container status \"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19\": rpc error: code = NotFound desc = could not find container \"863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19\": container with ID starting with 863129f2cf95e3588b560305bbc3b9c4f012c0a22802de52bad61bc43bea2a19 not found: ID does not exist" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.732976 4852 scope.go:117] "RemoveContainer" containerID="ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a" Jan 29 11:01:30 crc kubenswrapper[4852]: E0129 11:01:30.733202 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a\": container with ID starting with ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a not found: ID does not exist" containerID="ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a" Jan 29 11:01:30 crc kubenswrapper[4852]: I0129 11:01:30.733223 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a"} err="failed to get container status \"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a\": rpc error: code = NotFound desc = could not find container \"ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a\": container with ID starting with ca5699c8e884f351850eebb024d67d4417cb615c4310941a596ac7ad27d8576a not found: ID does not exist" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.480569 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" path="/var/lib/kubelet/pods/0e78d444-dc43-4deb-9c46-a027c64b7ed5/volumes" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.483209 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" path="/var/lib/kubelet/pods/ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191/volumes" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.540759 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerStarted","Data":"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89"} Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.540805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerStarted","Data":"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d"} Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.541225 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.564323 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" event={"ID":"7425bd14-f51f-408c-8fa0-749ce9aa74c7","Type":"ContainerStarted","Data":"6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0"} Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.565046 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.569891 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.569873552 podStartE2EDuration="3.569873552s" podCreationTimestamp="2026-01-29 11:01:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:31.564555991 +0000 UTC m=+1188.781887145" watchObservedRunningTime="2026-01-29 11:01:31.569873552 +0000 UTC m=+1188.787204686" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.586049 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerStarted","Data":"e1160aa880ddcc6c70cca5bef024fc9b1f5e981934d831ff90db68fc42515574"} Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.595702 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" podStartSLOduration=3.595684655 podStartE2EDuration="3.595684655s" podCreationTimestamp="2026-01-29 11:01:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:31.584297605 +0000 UTC m=+1188.801628749" watchObservedRunningTime="2026-01-29 11:01:31.595684655 +0000 UTC m=+1188.813015789" Jan 29 11:01:31 crc kubenswrapper[4852]: I0129 11:01:31.662669 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.599390 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerStarted","Data":"55dec1754cc3bbcae819eadf58bc9b76ae89286112e9026cc24bf317a6c1c35c"} Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.626948 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.666936345 podStartE2EDuration="4.626924083s" podCreationTimestamp="2026-01-29 11:01:28 +0000 UTC" firstStartedPulling="2026-01-29 11:01:29.770830401 +0000 UTC m=+1186.988161535" lastFinishedPulling="2026-01-29 11:01:30.730818139 +0000 UTC m=+1187.948149273" observedRunningTime="2026-01-29 11:01:32.623108339 +0000 UTC m=+1189.840439483" watchObservedRunningTime="2026-01-29 11:01:32.626924083 +0000 UTC m=+1189.844255227" Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.727414 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.763171 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.821043 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.821421 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5b854ff6b8-lksr4" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api" containerID="cri-o://a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79" gracePeriod=30 Jan 29 11:01:32 crc kubenswrapper[4852]: I0129 11:01:32.821362 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5b854ff6b8-lksr4" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api-log" containerID="cri-o://b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb" gracePeriod=30 Jan 29 11:01:33 crc kubenswrapper[4852]: I0129 11:01:33.611803 4852 generic.go:334] "Generic (PLEG): container finished" podID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerID="b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb" exitCode=143 Jan 29 11:01:33 crc kubenswrapper[4852]: I0129 11:01:33.611897 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerDied","Data":"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb"} Jan 29 11:01:33 crc kubenswrapper[4852]: I0129 11:01:33.612700 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api-log" containerID="cri-o://cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" gracePeriod=30 Jan 29 11:01:33 crc kubenswrapper[4852]: I0129 11:01:33.612718 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api" containerID="cri-o://88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" gracePeriod=30 Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.006905 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.245801 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390213 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390295 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390331 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390379 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390415 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390444 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390482 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgj6p\" (UniqueName: \"kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p\") pod \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\" (UID: \"c374ee3b-421a-497d-9058-bf4f3efaa4fb\") " Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.390843 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.391114 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs" (OuterVolumeSpecName: "logs") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.397221 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.398289 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts" (OuterVolumeSpecName: "scripts") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.411961 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p" (OuterVolumeSpecName: "kube-api-access-zgj6p") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "kube-api-access-zgj6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.415890 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.445025 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data" (OuterVolumeSpecName: "config-data") pod "c374ee3b-421a-497d-9058-bf4f3efaa4fb" (UID: "c374ee3b-421a-497d-9058-bf4f3efaa4fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492479 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c374ee3b-421a-497d-9058-bf4f3efaa4fb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492541 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgj6p\" (UniqueName: \"kubernetes.io/projected/c374ee3b-421a-497d-9058-bf4f3efaa4fb-kube-api-access-zgj6p\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492558 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492570 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492621 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c374ee3b-421a-497d-9058-bf4f3efaa4fb-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492633 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.492644 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c374ee3b-421a-497d-9058-bf4f3efaa4fb-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.631687 4852 generic.go:334] "Generic (PLEG): container finished" podID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerID="88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" exitCode=0 Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.631719 4852 generic.go:334] "Generic (PLEG): container finished" podID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerID="cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" exitCode=143 Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.632095 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.632794 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerDied","Data":"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89"} Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.632821 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerDied","Data":"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d"} Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.632833 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c374ee3b-421a-497d-9058-bf4f3efaa4fb","Type":"ContainerDied","Data":"e0f01ba6a80e5a2a7b1f8e5544131994d51b6c4ae8587d8e12050d15f660fed9"} Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.632847 4852 scope.go:117] "RemoveContainer" containerID="88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.677811 4852 scope.go:117] "RemoveContainer" containerID="cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.691627 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.711111 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.720602 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721125 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721137 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721157 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721163 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker-log" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721190 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721196 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721222 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener-log" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721246 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721252 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.721270 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721276 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721598 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721612 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721631 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721652 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" containerName="cinder-api-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721669 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e78d444-dc43-4deb-9c46-a027c64b7ed5" containerName="barbican-worker" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.721684 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba600c50-3c4a-4ab4-b3bd-2cb5d8cac191" containerName="barbican-keystone-listener-log" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.723119 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.725152 4852 scope.go:117] "RemoveContainer" containerID="88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.729261 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89\": container with ID starting with 88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89 not found: ID does not exist" containerID="88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.729306 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89"} err="failed to get container status \"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89\": rpc error: code = NotFound desc = could not find container \"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89\": container with ID starting with 88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89 not found: ID does not exist" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.729332 4852 scope.go:117] "RemoveContainer" containerID="cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.729627 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.729851 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.730279 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 11:01:34 crc kubenswrapper[4852]: E0129 11:01:34.731878 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d\": container with ID starting with cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d not found: ID does not exist" containerID="cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.731930 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d"} err="failed to get container status \"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d\": rpc error: code = NotFound desc = could not find container \"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d\": container with ID starting with cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d not found: ID does not exist" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.731961 4852 scope.go:117] "RemoveContainer" containerID="88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.732276 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89"} err="failed to get container status \"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89\": rpc error: code = NotFound desc = could not find container \"88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89\": container with ID starting with 88d0ce37cb1994b760210f64aa5a648247e45cbc2056f5053cb195d9da9a2f89 not found: ID does not exist" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.732303 4852 scope.go:117] "RemoveContainer" containerID="cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.732617 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d"} err="failed to get container status \"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d\": rpc error: code = NotFound desc = could not find container \"cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d\": container with ID starting with cca158a9ebb7652b5ddd832294449b0f3c9ab0ada6426b123fbed923be3d371d not found: ID does not exist" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.745685 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.904191 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.904291 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln67j\" (UniqueName: \"kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905131 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905184 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905307 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905388 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905510 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905552 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:34 crc kubenswrapper[4852]: I0129 11:01:34.905598 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007321 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007414 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007500 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007521 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007542 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007696 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007709 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln67j\" (UniqueName: \"kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007812 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.007872 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.008300 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.013445 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.014722 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.014774 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.016075 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.017282 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.017654 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.037103 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln67j\" (UniqueName: \"kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j\") pod \"cinder-api-0\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.060693 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.481362 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c374ee3b-421a-497d-9058-bf4f3efaa4fb" path="/var/lib/kubelet/pods/c374ee3b-421a-497d-9058-bf4f3efaa4fb/volumes" Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.600629 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:01:35 crc kubenswrapper[4852]: W0129 11:01:35.604406 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode967d95c_8de4_4167_82ef_1b32f6026476.slice/crio-e8afd6b1f7b101e5b00004ac0fadc68cd2f4847b91fa7a3b8d797c3ad5375834 WatchSource:0}: Error finding container e8afd6b1f7b101e5b00004ac0fadc68cd2f4847b91fa7a3b8d797c3ad5375834: Status 404 returned error can't find the container with id e8afd6b1f7b101e5b00004ac0fadc68cd2f4847b91fa7a3b8d797c3ad5375834 Jan 29 11:01:35 crc kubenswrapper[4852]: I0129 11:01:35.645815 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerStarted","Data":"e8afd6b1f7b101e5b00004ac0fadc68cd2f4847b91fa7a3b8d797c3ad5375834"} Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.006342 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5b854ff6b8-lksr4" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:50532->10.217.0.165:9311: read: connection reset by peer" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.006408 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5b854ff6b8-lksr4" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:50536->10.217.0.165:9311: read: connection reset by peer" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.403653 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.533881 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r4zd\" (UniqueName: \"kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd\") pod \"23ac92d2-cb89-4381-84cd-1f6757afc78b\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.534695 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data\") pod \"23ac92d2-cb89-4381-84cd-1f6757afc78b\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.534819 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle\") pod \"23ac92d2-cb89-4381-84cd-1f6757afc78b\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.534890 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs\") pod \"23ac92d2-cb89-4381-84cd-1f6757afc78b\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.535106 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom\") pod \"23ac92d2-cb89-4381-84cd-1f6757afc78b\" (UID: \"23ac92d2-cb89-4381-84cd-1f6757afc78b\") " Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.537269 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs" (OuterVolumeSpecName: "logs") pod "23ac92d2-cb89-4381-84cd-1f6757afc78b" (UID: "23ac92d2-cb89-4381-84cd-1f6757afc78b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.538974 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd" (OuterVolumeSpecName: "kube-api-access-5r4zd") pod "23ac92d2-cb89-4381-84cd-1f6757afc78b" (UID: "23ac92d2-cb89-4381-84cd-1f6757afc78b"). InnerVolumeSpecName "kube-api-access-5r4zd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.549801 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "23ac92d2-cb89-4381-84cd-1f6757afc78b" (UID: "23ac92d2-cb89-4381-84cd-1f6757afc78b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.579810 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23ac92d2-cb89-4381-84cd-1f6757afc78b" (UID: "23ac92d2-cb89-4381-84cd-1f6757afc78b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.614731 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data" (OuterVolumeSpecName: "config-data") pod "23ac92d2-cb89-4381-84cd-1f6757afc78b" (UID: "23ac92d2-cb89-4381-84cd-1f6757afc78b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.637033 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.637072 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r4zd\" (UniqueName: \"kubernetes.io/projected/23ac92d2-cb89-4381-84cd-1f6757afc78b-kube-api-access-5r4zd\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.637090 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.637101 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ac92d2-cb89-4381-84cd-1f6757afc78b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.637110 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23ac92d2-cb89-4381-84cd-1f6757afc78b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.668150 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerStarted","Data":"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351"} Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.671711 4852 generic.go:334] "Generic (PLEG): container finished" podID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerID="a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79" exitCode=0 Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.671740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerDied","Data":"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79"} Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.671761 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5b854ff6b8-lksr4" event={"ID":"23ac92d2-cb89-4381-84cd-1f6757afc78b","Type":"ContainerDied","Data":"640439362f7959e63be798f2dcb25045fa0da8834d03674f2b2f137fc46e10ba"} Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.671779 4852 scope.go:117] "RemoveContainer" containerID="a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.671785 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5b854ff6b8-lksr4" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.717247 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.733596 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5b854ff6b8-lksr4"] Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.746778 4852 scope.go:117] "RemoveContainer" containerID="b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.769107 4852 scope.go:117] "RemoveContainer" containerID="a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79" Jan 29 11:01:36 crc kubenswrapper[4852]: E0129 11:01:36.771322 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79\": container with ID starting with a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79 not found: ID does not exist" containerID="a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.771353 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79"} err="failed to get container status \"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79\": rpc error: code = NotFound desc = could not find container \"a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79\": container with ID starting with a9b592ccfa61b9a8200450524efe90c297c4f352e6424a3ac43d9eaadad48e79 not found: ID does not exist" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.771375 4852 scope.go:117] "RemoveContainer" containerID="b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb" Jan 29 11:01:36 crc kubenswrapper[4852]: E0129 11:01:36.771722 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb\": container with ID starting with b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb not found: ID does not exist" containerID="b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb" Jan 29 11:01:36 crc kubenswrapper[4852]: I0129 11:01:36.771748 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb"} err="failed to get container status \"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb\": rpc error: code = NotFound desc = could not find container \"b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb\": container with ID starting with b2c60f1e1c8634bd507d7415a931e1229e74565cba562bb657a1485efea8becb not found: ID does not exist" Jan 29 11:01:37 crc kubenswrapper[4852]: I0129 11:01:37.475533 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" path="/var/lib/kubelet/pods/23ac92d2-cb89-4381-84cd-1f6757afc78b/volumes" Jan 29 11:01:37 crc kubenswrapper[4852]: I0129 11:01:37.683444 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerStarted","Data":"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983"} Jan 29 11:01:37 crc kubenswrapper[4852]: I0129 11:01:37.685477 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 11:01:37 crc kubenswrapper[4852]: I0129 11:01:37.713694 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.713674128 podStartE2EDuration="3.713674128s" podCreationTimestamp="2026-01-29 11:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:37.711992426 +0000 UTC m=+1194.929323560" watchObservedRunningTime="2026-01-29 11:01:37.713674128 +0000 UTC m=+1194.931005262" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.071915 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.126208 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.129564 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="dnsmasq-dns" containerID="cri-o://77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b" gracePeriod=10 Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.278425 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.351704 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.642953 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.715540 4852 generic.go:334] "Generic (PLEG): container finished" podID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerID="77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b" exitCode=0 Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.715636 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.715638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" event={"ID":"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8","Type":"ContainerDied","Data":"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b"} Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.716021 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" event={"ID":"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8","Type":"ContainerDied","Data":"3d8e5a691339e6915a41eb92500d67bee415764f21a1b5761fa625bb840471bb"} Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.716060 4852 scope.go:117] "RemoveContainer" containerID="77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.716596 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="cinder-scheduler" containerID="cri-o://e1160aa880ddcc6c70cca5bef024fc9b1f5e981934d831ff90db68fc42515574" gracePeriod=30 Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.716605 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="probe" containerID="cri-o://55dec1754cc3bbcae819eadf58bc9b76ae89286112e9026cc24bf317a6c1c35c" gracePeriod=30 Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.750687 4852 scope.go:117] "RemoveContainer" containerID="ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.769497 4852 scope.go:117] "RemoveContainer" containerID="77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b" Jan 29 11:01:39 crc kubenswrapper[4852]: E0129 11:01:39.769894 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b\": container with ID starting with 77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b not found: ID does not exist" containerID="77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.770002 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b"} err="failed to get container status \"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b\": rpc error: code = NotFound desc = could not find container \"77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b\": container with ID starting with 77d6653a2b408788afd9552572f3d5201a1836f2f680bbc1e31d8b102a7d149b not found: ID does not exist" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.770078 4852 scope.go:117] "RemoveContainer" containerID="ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0" Jan 29 11:01:39 crc kubenswrapper[4852]: E0129 11:01:39.771545 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0\": container with ID starting with ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0 not found: ID does not exist" containerID="ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.771566 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0"} err="failed to get container status \"ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0\": rpc error: code = NotFound desc = could not find container \"ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0\": container with ID starting with ca85d043e8445f570e82838af92d3ecab00d50cd1e2e29935840a154d46da4f0 not found: ID does not exist" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809430 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809548 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809686 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809834 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhv4g\" (UniqueName: \"kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.809892 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb\") pod \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\" (UID: \"b6c46b43-1c5f-4ed7-a286-757b9a5e73e8\") " Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.815568 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g" (OuterVolumeSpecName: "kube-api-access-xhv4g") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "kube-api-access-xhv4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.860055 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config" (OuterVolumeSpecName: "config") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.860136 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.861521 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.864651 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.876923 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" (UID: "b6c46b43-1c5f-4ed7-a286-757b9a5e73e8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912399 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912430 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912463 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhv4g\" (UniqueName: \"kubernetes.io/projected/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-kube-api-access-xhv4g\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912487 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912498 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:39 crc kubenswrapper[4852]: I0129 11:01:39.912509 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:40 crc kubenswrapper[4852]: I0129 11:01:40.048088 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:40 crc kubenswrapper[4852]: I0129 11:01:40.057523 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-vdgvd"] Jan 29 11:01:40 crc kubenswrapper[4852]: I0129 11:01:40.529091 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 29 11:01:40 crc kubenswrapper[4852]: I0129 11:01:40.728597 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerID="55dec1754cc3bbcae819eadf58bc9b76ae89286112e9026cc24bf317a6c1c35c" exitCode=0 Jan 29 11:01:40 crc kubenswrapper[4852]: I0129 11:01:40.728690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerDied","Data":"55dec1754cc3bbcae819eadf58bc9b76ae89286112e9026cc24bf317a6c1c35c"} Jan 29 11:01:41 crc kubenswrapper[4852]: I0129 11:01:41.478828 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" path="/var/lib/kubelet/pods/b6c46b43-1c5f-4ed7-a286-757b9a5e73e8/volumes" Jan 29 11:01:42 crc kubenswrapper[4852]: I0129 11:01:42.751251 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerID="e1160aa880ddcc6c70cca5bef024fc9b1f5e981934d831ff90db68fc42515574" exitCode=0 Jan 29 11:01:42 crc kubenswrapper[4852]: I0129 11:01:42.751346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerDied","Data":"e1160aa880ddcc6c70cca5bef024fc9b1f5e981934d831ff90db68fc42515574"} Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.178997 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.266764 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.266846 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5zvw\" (UniqueName: \"kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.266889 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.266933 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.267244 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.267285 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id\") pod \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\" (UID: \"4ebae001-f26f-4cef-877e-6dcd5d132a4b\") " Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.267677 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.268156 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4ebae001-f26f-4cef-877e-6dcd5d132a4b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.272248 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts" (OuterVolumeSpecName: "scripts") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.272287 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw" (OuterVolumeSpecName: "kube-api-access-l5zvw") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "kube-api-access-l5zvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.272723 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.328134 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.369904 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.369996 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5zvw\" (UniqueName: \"kubernetes.io/projected/4ebae001-f26f-4cef-877e-6dcd5d132a4b-kube-api-access-l5zvw\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.370012 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.370025 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.381723 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data" (OuterVolumeSpecName: "config-data") pod "4ebae001-f26f-4cef-877e-6dcd5d132a4b" (UID: "4ebae001-f26f-4cef-877e-6dcd5d132a4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.471198 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ebae001-f26f-4cef-877e-6dcd5d132a4b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.762177 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4ebae001-f26f-4cef-877e-6dcd5d132a4b","Type":"ContainerDied","Data":"e784083fa0349a7eaf65fe1c5b588951ddc4175bafe999e86648cd887e4d22d1"} Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.762661 4852 scope.go:117] "RemoveContainer" containerID="55dec1754cc3bbcae819eadf58bc9b76ae89286112e9026cc24bf317a6c1c35c" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.762686 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.799952 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.800832 4852 scope.go:117] "RemoveContainer" containerID="e1160aa880ddcc6c70cca5bef024fc9b1f5e981934d831ff90db68fc42515574" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.816061 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827266 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827786 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api-log" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827815 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api-log" Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827841 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="dnsmasq-dns" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827851 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="dnsmasq-dns" Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827869 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827877 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api" Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827895 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="init" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827903 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="init" Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827929 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="probe" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827937 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="probe" Jan 29 11:01:43 crc kubenswrapper[4852]: E0129 11:01:43.827952 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="cinder-scheduler" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.827960 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="cinder-scheduler" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.828166 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api-log" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.828192 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="dnsmasq-dns" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.828205 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="probe" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.828219 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" containerName="cinder-scheduler" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.828233 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="23ac92d2-cb89-4381-84cd-1f6757afc78b" containerName="barbican-api" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.829434 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.833879 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.846803 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.992501 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.992937 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.993000 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbslw\" (UniqueName: \"kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.993071 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.993105 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:43 crc kubenswrapper[4852]: I0129 11:01:43.993164 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.095120 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.095197 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.095475 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.095775 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.096081 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.096365 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.096430 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbslw\" (UniqueName: \"kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.100753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.101704 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.101944 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.119977 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.129177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbslw\" (UniqueName: \"kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw\") pod \"cinder-scheduler-0\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.149120 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.572398 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-688c87cc99-vdgvd" podUID="b6c46b43-1c5f-4ed7-a286-757b9a5e73e8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.160:5353: i/o timeout" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.574280 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.594116 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.679266 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.717143 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.773373 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerStarted","Data":"4e01e660e75412bf1c9a61121fcc3f86002df35ca14e523698899bbd731d8c9f"} Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.855679 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.857541 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:44 crc kubenswrapper[4852]: I0129 11:01:44.884669 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014735 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014814 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014876 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014925 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.014955 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcq82\" (UniqueName: \"kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.015032 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.116949 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcq82\" (UniqueName: \"kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117190 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117368 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117437 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117499 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.117737 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.118012 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.122790 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.125465 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.125680 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.125740 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.135261 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.135974 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcq82\" (UniqueName: \"kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82\") pod \"placement-5c4b86c744-rrhm8\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.188294 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.477680 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ebae001-f26f-4cef-877e-6dcd5d132a4b" path="/var/lib/kubelet/pods/4ebae001-f26f-4cef-877e-6dcd5d132a4b/volumes" Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.662719 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.793212 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerStarted","Data":"8f16b9012519b79efc513e1f18d3b767fcd05df55aa6042bb6f2264563e53734"} Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.798684 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerStarted","Data":"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11"} Jan 29 11:01:45 crc kubenswrapper[4852]: I0129 11:01:45.918631 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.809097 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerStarted","Data":"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc"} Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.809682 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.809697 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.809714 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerStarted","Data":"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9"} Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.811518 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerStarted","Data":"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f"} Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.840316 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5c4b86c744-rrhm8" podStartSLOduration=2.840294326 podStartE2EDuration="2.840294326s" podCreationTimestamp="2026-01-29 11:01:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:46.832762661 +0000 UTC m=+1204.050093835" watchObservedRunningTime="2026-01-29 11:01:46.840294326 +0000 UTC m=+1204.057625460" Jan 29 11:01:46 crc kubenswrapper[4852]: I0129 11:01:46.858731 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.858713548 podStartE2EDuration="3.858713548s" podCreationTimestamp="2026-01-29 11:01:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:46.854862443 +0000 UTC m=+1204.072193587" watchObservedRunningTime="2026-01-29 11:01:46.858713548 +0000 UTC m=+1204.076044682" Jan 29 11:01:47 crc kubenswrapper[4852]: I0129 11:01:47.110716 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.440354 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.498161 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.498370 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5d5c584546-w57m7" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-api" containerID="cri-o://6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f" gracePeriod=30 Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.499118 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5d5c584546-w57m7" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-httpd" containerID="cri-o://1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab" gracePeriod=30 Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.852901 4852 generic.go:334] "Generic (PLEG): container finished" podID="822e4bb3-691c-486f-a043-5403174ee25c" containerID="1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab" exitCode=0 Jan 29 11:01:48 crc kubenswrapper[4852]: I0129 11:01:48.852952 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerDied","Data":"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab"} Jan 29 11:01:49 crc kubenswrapper[4852]: I0129 11:01:49.149984 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.050224 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.052246 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.054207 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-wvhjc" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.056820 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.059956 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.060028 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.176956 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.177028 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.177076 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.177266 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72nfw\" (UniqueName: \"kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.277965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72nfw\" (UniqueName: \"kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.278053 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.278073 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.278093 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.278899 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.283877 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.296247 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.298120 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72nfw\" (UniqueName: \"kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw\") pod \"openstackclient\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.374024 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.811994 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.813685 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.820716 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.820768 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.821812 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.838849 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.869965 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.992551 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.992916 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78zt7\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.992996 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.993031 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.993047 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.993094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.993376 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:51 crc kubenswrapper[4852]: I0129 11:01:51.993406 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094458 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094528 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094546 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094606 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094628 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094651 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094689 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.094711 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78zt7\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.095720 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.095970 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.100516 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.102728 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.103138 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.103246 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.104019 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.114907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78zt7\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7\") pod \"swift-proxy-7c548544bc-nwvzz\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.144197 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.676036 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:01:52 crc kubenswrapper[4852]: W0129 11:01:52.680057 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5704c357_5a1a_4a0b_9ce9_aa1e5c550c1e.slice/crio-9bd8b6960e80544de08980856dd9d13dda7069a61b4380365cdda0466cd063a6 WatchSource:0}: Error finding container 9bd8b6960e80544de08980856dd9d13dda7069a61b4380365cdda0466cd063a6: Status 404 returned error can't find the container with id 9bd8b6960e80544de08980856dd9d13dda7069a61b4380365cdda0466cd063a6 Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.915780 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"20516ac5-463a-4d2c-a442-d74254876ddf","Type":"ContainerStarted","Data":"64d65ecc7347d41002e70f40763ad11f097d63e35d433683aa217affd647c32c"} Jan 29 11:01:52 crc kubenswrapper[4852]: I0129 11:01:52.918608 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerStarted","Data":"9bd8b6960e80544de08980856dd9d13dda7069a61b4380365cdda0466cd063a6"} Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.594215 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.726005 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config\") pod \"822e4bb3-691c-486f-a043-5403174ee25c\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.726060 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle\") pod \"822e4bb3-691c-486f-a043-5403174ee25c\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.726105 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs\") pod \"822e4bb3-691c-486f-a043-5403174ee25c\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.726126 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config\") pod \"822e4bb3-691c-486f-a043-5403174ee25c\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.726220 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nj26\" (UniqueName: \"kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26\") pod \"822e4bb3-691c-486f-a043-5403174ee25c\" (UID: \"822e4bb3-691c-486f-a043-5403174ee25c\") " Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.738723 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26" (OuterVolumeSpecName: "kube-api-access-5nj26") pod "822e4bb3-691c-486f-a043-5403174ee25c" (UID: "822e4bb3-691c-486f-a043-5403174ee25c"). InnerVolumeSpecName "kube-api-access-5nj26". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.755678 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "822e4bb3-691c-486f-a043-5403174ee25c" (UID: "822e4bb3-691c-486f-a043-5403174ee25c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.800047 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config" (OuterVolumeSpecName: "config") pod "822e4bb3-691c-486f-a043-5403174ee25c" (UID: "822e4bb3-691c-486f-a043-5403174ee25c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.829080 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.829122 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.829137 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nj26\" (UniqueName: \"kubernetes.io/projected/822e4bb3-691c-486f-a043-5403174ee25c-kube-api-access-5nj26\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.833223 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "822e4bb3-691c-486f-a043-5403174ee25c" (UID: "822e4bb3-691c-486f-a043-5403174ee25c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.846756 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "822e4bb3-691c-486f-a043-5403174ee25c" (UID: "822e4bb3-691c-486f-a043-5403174ee25c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.930494 4852 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.930535 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/822e4bb3-691c-486f-a043-5403174ee25c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.932658 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerStarted","Data":"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca"} Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.932708 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerStarted","Data":"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb"} Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.933918 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.933951 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.941062 4852 generic.go:334] "Generic (PLEG): container finished" podID="822e4bb3-691c-486f-a043-5403174ee25c" containerID="6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f" exitCode=0 Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.941112 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerDied","Data":"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f"} Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.941144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d5c584546-w57m7" event={"ID":"822e4bb3-691c-486f-a043-5403174ee25c","Type":"ContainerDied","Data":"ef1265cf13d3e1a654b01a6832f881ee18608b3a2c06d460c335afd5e2aa3dc6"} Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.941167 4852 scope.go:117] "RemoveContainer" containerID="1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.941371 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d5c584546-w57m7" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.956959 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7c548544bc-nwvzz" podStartSLOduration=2.956940227 podStartE2EDuration="2.956940227s" podCreationTimestamp="2026-01-29 11:01:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:01:53.954268501 +0000 UTC m=+1211.171599635" watchObservedRunningTime="2026-01-29 11:01:53.956940227 +0000 UTC m=+1211.174271361" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.983169 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.989280 4852 scope.go:117] "RemoveContainer" containerID="6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f" Jan 29 11:01:53 crc kubenswrapper[4852]: I0129 11:01:53.991321 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5d5c584546-w57m7"] Jan 29 11:01:54 crc kubenswrapper[4852]: I0129 11:01:54.018261 4852 scope.go:117] "RemoveContainer" containerID="1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab" Jan 29 11:01:54 crc kubenswrapper[4852]: E0129 11:01:54.018675 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab\": container with ID starting with 1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab not found: ID does not exist" containerID="1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab" Jan 29 11:01:54 crc kubenswrapper[4852]: I0129 11:01:54.018711 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab"} err="failed to get container status \"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab\": rpc error: code = NotFound desc = could not find container \"1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab\": container with ID starting with 1514f14771fbb378553572c01784e629454a2e3bb66d0a579c3ea1210d2dfcab not found: ID does not exist" Jan 29 11:01:54 crc kubenswrapper[4852]: I0129 11:01:54.018740 4852 scope.go:117] "RemoveContainer" containerID="6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f" Jan 29 11:01:54 crc kubenswrapper[4852]: E0129 11:01:54.019180 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f\": container with ID starting with 6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f not found: ID does not exist" containerID="6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f" Jan 29 11:01:54 crc kubenswrapper[4852]: I0129 11:01:54.019206 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f"} err="failed to get container status \"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f\": rpc error: code = NotFound desc = could not find container \"6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f\": container with ID starting with 6e893add3304d2ad217553950907c5fdbc5b10fceaf40507a9f0771488de2f1f not found: ID does not exist" Jan 29 11:01:54 crc kubenswrapper[4852]: I0129 11:01:54.430492 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.477474 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="822e4bb3-691c-486f-a043-5403174ee25c" path="/var/lib/kubelet/pods/822e4bb3-691c-486f-a043-5403174ee25c/volumes" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.817908 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.967994 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968052 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968152 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968191 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968263 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968397 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.968451 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nhtx\" (UniqueName: \"kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx\") pod \"f988dca9-b8dd-406e-b316-d27052f43c80\" (UID: \"f988dca9-b8dd-406e-b316-d27052f43c80\") " Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.970037 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.970049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.970412 4852 generic.go:334] "Generic (PLEG): container finished" podID="f988dca9-b8dd-406e-b316-d27052f43c80" containerID="078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93" exitCode=137 Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.971424 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.972126 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerDied","Data":"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93"} Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.972238 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f988dca9-b8dd-406e-b316-d27052f43c80","Type":"ContainerDied","Data":"e4028ccc4c654e6cf43e72ee673999be03e16430749cc61f57d452db9f477cee"} Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.972290 4852 scope.go:117] "RemoveContainer" containerID="078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.976262 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts" (OuterVolumeSpecName: "scripts") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:55 crc kubenswrapper[4852]: I0129 11:01:55.979406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx" (OuterVolumeSpecName: "kube-api-access-9nhtx") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "kube-api-access-9nhtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.014516 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.050226 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.060206 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data" (OuterVolumeSpecName: "config-data") pod "f988dca9-b8dd-406e-b316-d27052f43c80" (UID: "f988dca9-b8dd-406e-b316-d27052f43c80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071678 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071715 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nhtx\" (UniqueName: \"kubernetes.io/projected/f988dca9-b8dd-406e-b316-d27052f43c80-kube-api-access-9nhtx\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071730 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071741 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071752 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f988dca9-b8dd-406e-b316-d27052f43c80-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071763 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.071774 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f988dca9-b8dd-406e-b316-d27052f43c80-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.080688 4852 scope.go:117] "RemoveContainer" containerID="966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.113842 4852 scope.go:117] "RemoveContainer" containerID="a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.132191 4852 scope.go:117] "RemoveContainer" containerID="b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.154287 4852 scope.go:117] "RemoveContainer" containerID="078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.163468 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93\": container with ID starting with 078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93 not found: ID does not exist" containerID="078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.163501 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93"} err="failed to get container status \"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93\": rpc error: code = NotFound desc = could not find container \"078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93\": container with ID starting with 078b224d9ce3219a2b41d0b328a8d1f755d6e1b1b7c6c191efc22ba867216c93 not found: ID does not exist" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.163524 4852 scope.go:117] "RemoveContainer" containerID="966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.163897 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7\": container with ID starting with 966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7 not found: ID does not exist" containerID="966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.163939 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7"} err="failed to get container status \"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7\": rpc error: code = NotFound desc = could not find container \"966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7\": container with ID starting with 966d1506f494d641e2686d0c2588e0257234bcbec58879fea898f9da178855b7 not found: ID does not exist" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.163998 4852 scope.go:117] "RemoveContainer" containerID="a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.164527 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a\": container with ID starting with a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a not found: ID does not exist" containerID="a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.164550 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a"} err="failed to get container status \"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a\": rpc error: code = NotFound desc = could not find container \"a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a\": container with ID starting with a2632b9f23d85e89b24f8733148871bd86efc3a59f73010dc4b413a694f1ac9a not found: ID does not exist" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.164566 4852 scope.go:117] "RemoveContainer" containerID="b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.164819 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf\": container with ID starting with b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf not found: ID does not exist" containerID="b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.164839 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf"} err="failed to get container status \"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf\": rpc error: code = NotFound desc = could not find container \"b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf\": container with ID starting with b3301ec255ae34c79c9f8030f6865560465e269c554193db4e503f6a7847b2cf not found: ID does not exist" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.301997 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.312797 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.332663 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333046 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="sg-core" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333068 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="sg-core" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333092 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-notification-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333098 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-notification-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333121 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="proxy-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333128 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="proxy-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333145 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-central-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333153 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-central-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333165 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333172 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: E0129 11:01:56.333185 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-api" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333191 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-api" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333358 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-api" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333371 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-notification-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333380 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="sg-core" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333392 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="822e4bb3-691c-486f-a043-5403174ee25c" containerName="neutron-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333408 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="ceilometer-central-agent" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.333420 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" containerName="proxy-httpd" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.334933 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.337849 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.338001 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.354074 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479359 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dbmv\" (UniqueName: \"kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479385 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479531 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479612 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479635 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.479701 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581288 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581345 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581369 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581405 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581494 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581519 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dbmv\" (UniqueName: \"kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581538 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.581997 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.582290 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.586208 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.589622 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.597184 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.597748 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.600705 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dbmv\" (UniqueName: \"kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv\") pod \"ceilometer-0\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " pod="openstack/ceilometer-0" Jan 29 11:01:56 crc kubenswrapper[4852]: I0129 11:01:56.659475 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:01:57 crc kubenswrapper[4852]: I0129 11:01:57.121434 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:01:57 crc kubenswrapper[4852]: I0129 11:01:57.162695 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:01:57 crc kubenswrapper[4852]: I0129 11:01:57.477180 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f988dca9-b8dd-406e-b316-d27052f43c80" path="/var/lib/kubelet/pods/f988dca9-b8dd-406e-b316-d27052f43c80/volumes" Jan 29 11:02:01 crc kubenswrapper[4852]: I0129 11:02:01.679015 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:02 crc kubenswrapper[4852]: I0129 11:02:02.154289 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:02:02 crc kubenswrapper[4852]: W0129 11:02:02.273068 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe4b2847_5c03_4484_9df5_a3c65a5ffdfe.slice/crio-b83eff1cdcf5ac24d46373ad782ff1185c48add1f7f35beeeda9de5662e7f601 WatchSource:0}: Error finding container b83eff1cdcf5ac24d46373ad782ff1185c48add1f7f35beeeda9de5662e7f601: Status 404 returned error can't find the container with id b83eff1cdcf5ac24d46373ad782ff1185c48add1f7f35beeeda9de5662e7f601 Jan 29 11:02:03 crc kubenswrapper[4852]: I0129 11:02:03.405054 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"20516ac5-463a-4d2c-a442-d74254876ddf","Type":"ContainerStarted","Data":"9e2fbd4b9daaa437f8f321cc9702964e5924353cd1b5d954ec556bcd5e7b8cfd"} Jan 29 11:02:03 crc kubenswrapper[4852]: I0129 11:02:03.421189 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerStarted","Data":"b83eff1cdcf5ac24d46373ad782ff1185c48add1f7f35beeeda9de5662e7f601"} Jan 29 11:02:03 crc kubenswrapper[4852]: I0129 11:02:03.439375 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.955293956 podStartE2EDuration="12.439350886s" podCreationTimestamp="2026-01-29 11:01:51 +0000 UTC" firstStartedPulling="2026-01-29 11:01:51.87932159 +0000 UTC m=+1209.096652724" lastFinishedPulling="2026-01-29 11:02:02.36337852 +0000 UTC m=+1219.580709654" observedRunningTime="2026-01-29 11:02:03.427998907 +0000 UTC m=+1220.645330051" watchObservedRunningTime="2026-01-29 11:02:03.439350886 +0000 UTC m=+1220.656682020" Jan 29 11:02:04 crc kubenswrapper[4852]: I0129 11:02:04.433252 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerStarted","Data":"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051"} Jan 29 11:02:04 crc kubenswrapper[4852]: I0129 11:02:04.433788 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerStarted","Data":"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c"} Jan 29 11:02:05 crc kubenswrapper[4852]: I0129 11:02:05.479977 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerStarted","Data":"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03"} Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.501695 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerStarted","Data":"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff"} Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.502343 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-central-agent" containerID="cri-o://5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c" gracePeriod=30 Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.502604 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.502839 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="proxy-httpd" containerID="cri-o://fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff" gracePeriod=30 Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.502883 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="sg-core" containerID="cri-o://45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03" gracePeriod=30 Jan 29 11:02:08 crc kubenswrapper[4852]: I0129 11:02:08.502916 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-notification-agent" containerID="cri-o://ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051" gracePeriod=30 Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513554 4852 generic.go:334] "Generic (PLEG): container finished" podID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerID="fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff" exitCode=0 Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513823 4852 generic.go:334] "Generic (PLEG): container finished" podID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerID="45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03" exitCode=2 Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513832 4852 generic.go:334] "Generic (PLEG): container finished" podID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerID="ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051" exitCode=0 Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513628 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerDied","Data":"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff"} Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513870 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerDied","Data":"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03"} Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.513885 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerDied","Data":"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051"} Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.690804 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=8.664919912 podStartE2EDuration="13.690785643s" podCreationTimestamp="2026-01-29 11:01:56 +0000 UTC" firstStartedPulling="2026-01-29 11:02:02.275973175 +0000 UTC m=+1219.493304309" lastFinishedPulling="2026-01-29 11:02:07.301838916 +0000 UTC m=+1224.519170040" observedRunningTime="2026-01-29 11:02:08.53585431 +0000 UTC m=+1225.753185524" watchObservedRunningTime="2026-01-29 11:02:09.690785643 +0000 UTC m=+1226.908116777" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.691464 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-hjt4q"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.692711 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.699319 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hjt4q"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.798436 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-rhfbl"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.799905 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.808200 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-rhfbl"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.847011 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r57l7\" (UniqueName: \"kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.847605 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.908638 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-rnnzv"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.909785 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.913367 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c0a7-account-create-update-rbbpm"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.930360 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-rnnzv"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.931950 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c0a7-account-create-update-rbbpm"] Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.932249 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.939141 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.949016 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.949074 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5dps\" (UniqueName: \"kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.949104 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.949179 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r57l7\" (UniqueName: \"kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.950101 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:09 crc kubenswrapper[4852]: I0129 11:02:09.973542 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r57l7\" (UniqueName: \"kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7\") pod \"nova-api-db-create-hjt4q\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.051562 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5dps\" (UniqueName: \"kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.051632 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.051790 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.051893 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbs7v\" (UniqueName: \"kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.052078 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqkd2\" (UniqueName: \"kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.052239 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.052372 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.069541 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.072342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5dps\" (UniqueName: \"kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps\") pod \"nova-cell0-db-create-rhfbl\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.102994 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f82e-account-create-update-86cb7"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.104445 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.107574 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.125039 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.139051 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f82e-account-create-update-86cb7"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153710 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153766 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6ctf\" (UniqueName: \"kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153803 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153885 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbs7v\" (UniqueName: \"kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.153965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqkd2\" (UniqueName: \"kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.155099 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.169021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.169902 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbs7v\" (UniqueName: \"kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v\") pod \"nova-api-c0a7-account-create-update-rbbpm\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.185270 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqkd2\" (UniqueName: \"kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2\") pod \"nova-cell1-db-create-rnnzv\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.242685 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.256143 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6ctf\" (UniqueName: \"kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.256188 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.257189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.262923 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.287382 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6ctf\" (UniqueName: \"kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf\") pod \"nova-cell0-f82e-account-create-update-86cb7\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.296738 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.312725 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5040-account-create-update-cmfc4"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.314023 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.317330 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.325689 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-cmfc4"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.362623 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw9jr\" (UniqueName: \"kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.362754 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.464719 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.465160 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw9jr\" (UniqueName: \"kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.466036 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.488104 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw9jr\" (UniqueName: \"kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr\") pod \"nova-cell1-5040-account-create-update-cmfc4\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.639778 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.666574 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hjt4q"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.725001 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-rhfbl"] Jan 29 11:02:10 crc kubenswrapper[4852]: I0129 11:02:10.962630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c0a7-account-create-update-rbbpm"] Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.036112 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-rnnzv"] Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.133237 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f82e-account-create-update-86cb7"] Jan 29 11:02:11 crc kubenswrapper[4852]: W0129 11:02:11.146596 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7cb1fa8_606f_44bb_9b88_85bfbf76ba22.slice/crio-5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda WatchSource:0}: Error finding container 5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda: Status 404 returned error can't find the container with id 5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.253851 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-cmfc4"] Jan 29 11:02:11 crc kubenswrapper[4852]: W0129 11:02:11.269947 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5640101e_33ca_4d7f_a5db_2ddd5f04cb03.slice/crio-74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97 WatchSource:0}: Error finding container 74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97: Status 404 returned error can't find the container with id 74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97 Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.554892 4852 generic.go:334] "Generic (PLEG): container finished" podID="8071177d-5f88-4cda-a3a8-b36eb0807a64" containerID="1316e66541931581dfec528c6c6d4c09ab594c33c249d5a27bc94106e0e932b3" exitCode=0 Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.555256 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rhfbl" event={"ID":"8071177d-5f88-4cda-a3a8-b36eb0807a64","Type":"ContainerDied","Data":"1316e66541931581dfec528c6c6d4c09ab594c33c249d5a27bc94106e0e932b3"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.555287 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rhfbl" event={"ID":"8071177d-5f88-4cda-a3a8-b36eb0807a64","Type":"ContainerStarted","Data":"24f8c5366d5331dc03a85b46df381011cbd2d65970e35d0b5076b1c132528a6b"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.557874 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" event={"ID":"0e845226-3c75-48d0-9fbc-a22e885dac4d","Type":"ContainerStarted","Data":"0c1cc19179c9804cc80fd8b48b39876977813981c876f20d3c7e8de9ed98b401"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.557909 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" event={"ID":"0e845226-3c75-48d0-9fbc-a22e885dac4d","Type":"ContainerStarted","Data":"529266eeb18cb2c88e25bcbedee4f43f0b6e1ca1f5d901ff39f45acdf3ada547"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.560290 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rnnzv" event={"ID":"075426ec-6034-40e1-9449-bfec69b8e991","Type":"ContainerStarted","Data":"009d119591790d6a0f11735914b575b835e0543c13cfb09fee49e86d5dc2918f"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.562545 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" event={"ID":"5640101e-33ca-4d7f-a5db-2ddd5f04cb03","Type":"ContainerStarted","Data":"74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.564221 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" event={"ID":"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22","Type":"ContainerStarted","Data":"5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.565566 4852 generic.go:334] "Generic (PLEG): container finished" podID="e8eaa1c2-1b96-43b1-ba67-522014312ee6" containerID="08de13d6a892eddfd8ef7e195228d8f6b0c049491e81bb2ad93d8fa4eb38e23c" exitCode=0 Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.565740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hjt4q" event={"ID":"e8eaa1c2-1b96-43b1-ba67-522014312ee6","Type":"ContainerDied","Data":"08de13d6a892eddfd8ef7e195228d8f6b0c049491e81bb2ad93d8fa4eb38e23c"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.566228 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hjt4q" event={"ID":"e8eaa1c2-1b96-43b1-ba67-522014312ee6","Type":"ContainerStarted","Data":"f286c88a9d4eb34dba7147ae00894d6eb3d4484594cb77dc354132bfafcc666f"} Jan 29 11:02:11 crc kubenswrapper[4852]: I0129 11:02:11.615700 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" podStartSLOduration=2.6156788730000002 podStartE2EDuration="2.615678873s" podCreationTimestamp="2026-01-29 11:02:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:11.60662647 +0000 UTC m=+1228.823957624" watchObservedRunningTime="2026-01-29 11:02:11.615678873 +0000 UTC m=+1228.833010007" Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.578047 4852 generic.go:334] "Generic (PLEG): container finished" podID="e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" containerID="56adf8995d6d9f105e72928c811318e2027d2ada0d9e9d4dbc0b3cd7738924ab" exitCode=0 Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.578418 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" event={"ID":"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22","Type":"ContainerDied","Data":"56adf8995d6d9f105e72928c811318e2027d2ada0d9e9d4dbc0b3cd7738924ab"} Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.580923 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e845226-3c75-48d0-9fbc-a22e885dac4d" containerID="0c1cc19179c9804cc80fd8b48b39876977813981c876f20d3c7e8de9ed98b401" exitCode=0 Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.581023 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" event={"ID":"0e845226-3c75-48d0-9fbc-a22e885dac4d","Type":"ContainerDied","Data":"0c1cc19179c9804cc80fd8b48b39876977813981c876f20d3c7e8de9ed98b401"} Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.582460 4852 generic.go:334] "Generic (PLEG): container finished" podID="075426ec-6034-40e1-9449-bfec69b8e991" containerID="da9aa472fe83d92b938cf2bd579ba1249170e8f5d51d10d02034d707ea308d31" exitCode=0 Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.582505 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rnnzv" event={"ID":"075426ec-6034-40e1-9449-bfec69b8e991","Type":"ContainerDied","Data":"da9aa472fe83d92b938cf2bd579ba1249170e8f5d51d10d02034d707ea308d31"} Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.583724 4852 generic.go:334] "Generic (PLEG): container finished" podID="5640101e-33ca-4d7f-a5db-2ddd5f04cb03" containerID="153dabdfcf20ad54b10959d9a6c480c6fa8781e5c6362e50144c73daff82f857" exitCode=0 Jan 29 11:02:12 crc kubenswrapper[4852]: I0129 11:02:12.583872 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" event={"ID":"5640101e-33ca-4d7f-a5db-2ddd5f04cb03","Type":"ContainerDied","Data":"153dabdfcf20ad54b10959d9a6c480c6fa8781e5c6362e50144c73daff82f857"} Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.094349 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.215923 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts\") pod \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.216160 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r57l7\" (UniqueName: \"kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7\") pod \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\" (UID: \"e8eaa1c2-1b96-43b1-ba67-522014312ee6\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.216657 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8eaa1c2-1b96-43b1-ba67-522014312ee6" (UID: "e8eaa1c2-1b96-43b1-ba67-522014312ee6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.227226 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.227814 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7" (OuterVolumeSpecName: "kube-api-access-r57l7") pod "e8eaa1c2-1b96-43b1-ba67-522014312ee6" (UID: "e8eaa1c2-1b96-43b1-ba67-522014312ee6"). InnerVolumeSpecName "kube-api-access-r57l7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.318729 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r57l7\" (UniqueName: \"kubernetes.io/projected/e8eaa1c2-1b96-43b1-ba67-522014312ee6-kube-api-access-r57l7\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.318777 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8eaa1c2-1b96-43b1-ba67-522014312ee6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.419739 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5dps\" (UniqueName: \"kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps\") pod \"8071177d-5f88-4cda-a3a8-b36eb0807a64\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.419786 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts\") pod \"8071177d-5f88-4cda-a3a8-b36eb0807a64\" (UID: \"8071177d-5f88-4cda-a3a8-b36eb0807a64\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.420361 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8071177d-5f88-4cda-a3a8-b36eb0807a64" (UID: "8071177d-5f88-4cda-a3a8-b36eb0807a64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.423094 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps" (OuterVolumeSpecName: "kube-api-access-z5dps") pod "8071177d-5f88-4cda-a3a8-b36eb0807a64" (UID: "8071177d-5f88-4cda-a3a8-b36eb0807a64"). InnerVolumeSpecName "kube-api-access-z5dps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.522403 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5dps\" (UniqueName: \"kubernetes.io/projected/8071177d-5f88-4cda-a3a8-b36eb0807a64-kube-api-access-z5dps\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.522433 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8071177d-5f88-4cda-a3a8-b36eb0807a64-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.592669 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hjt4q" event={"ID":"e8eaa1c2-1b96-43b1-ba67-522014312ee6","Type":"ContainerDied","Data":"f286c88a9d4eb34dba7147ae00894d6eb3d4484594cb77dc354132bfafcc666f"} Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.592716 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f286c88a9d4eb34dba7147ae00894d6eb3d4484594cb77dc354132bfafcc666f" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.592690 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hjt4q" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.594249 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rhfbl" event={"ID":"8071177d-5f88-4cda-a3a8-b36eb0807a64","Type":"ContainerDied","Data":"24f8c5366d5331dc03a85b46df381011cbd2d65970e35d0b5076b1c132528a6b"} Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.594286 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24f8c5366d5331dc03a85b46df381011cbd2d65970e35d0b5076b1c132528a6b" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.594417 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rhfbl" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.894348 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.931908 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts\") pod \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.932104 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw9jr\" (UniqueName: \"kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr\") pod \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\" (UID: \"5640101e-33ca-4d7f-a5db-2ddd5f04cb03\") " Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.932938 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5640101e-33ca-4d7f-a5db-2ddd5f04cb03" (UID: "5640101e-33ca-4d7f-a5db-2ddd5f04cb03"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:13 crc kubenswrapper[4852]: I0129 11:02:13.939750 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr" (OuterVolumeSpecName: "kube-api-access-bw9jr") pod "5640101e-33ca-4d7f-a5db-2ddd5f04cb03" (UID: "5640101e-33ca-4d7f-a5db-2ddd5f04cb03"). InnerVolumeSpecName "kube-api-access-bw9jr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.045499 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.045546 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw9jr\" (UniqueName: \"kubernetes.io/projected/5640101e-33ca-4d7f-a5db-2ddd5f04cb03-kube-api-access-bw9jr\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.161552 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.169693 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.178167 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247325 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqkd2\" (UniqueName: \"kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2\") pod \"075426ec-6034-40e1-9449-bfec69b8e991\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247375 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6ctf\" (UniqueName: \"kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf\") pod \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247434 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts\") pod \"0e845226-3c75-48d0-9fbc-a22e885dac4d\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247465 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbs7v\" (UniqueName: \"kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v\") pod \"0e845226-3c75-48d0-9fbc-a22e885dac4d\" (UID: \"0e845226-3c75-48d0-9fbc-a22e885dac4d\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247496 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts\") pod \"075426ec-6034-40e1-9449-bfec69b8e991\" (UID: \"075426ec-6034-40e1-9449-bfec69b8e991\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247534 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts\") pod \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\" (UID: \"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22\") " Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.247993 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "075426ec-6034-40e1-9449-bfec69b8e991" (UID: "075426ec-6034-40e1-9449-bfec69b8e991"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.248136 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e845226-3c75-48d0-9fbc-a22e885dac4d" (UID: "0e845226-3c75-48d0-9fbc-a22e885dac4d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.251979 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" (UID: "e7cb1fa8-606f-44bb-9b88-85bfbf76ba22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.252992 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2" (OuterVolumeSpecName: "kube-api-access-qqkd2") pod "075426ec-6034-40e1-9449-bfec69b8e991" (UID: "075426ec-6034-40e1-9449-bfec69b8e991"). InnerVolumeSpecName "kube-api-access-qqkd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.266940 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf" (OuterVolumeSpecName: "kube-api-access-b6ctf") pod "e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" (UID: "e7cb1fa8-606f-44bb-9b88-85bfbf76ba22"). InnerVolumeSpecName "kube-api-access-b6ctf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.270209 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v" (OuterVolumeSpecName: "kube-api-access-zbs7v") pod "0e845226-3c75-48d0-9fbc-a22e885dac4d" (UID: "0e845226-3c75-48d0-9fbc-a22e885dac4d"). InnerVolumeSpecName "kube-api-access-zbs7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348673 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbs7v\" (UniqueName: \"kubernetes.io/projected/0e845226-3c75-48d0-9fbc-a22e885dac4d-kube-api-access-zbs7v\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348711 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/075426ec-6034-40e1-9449-bfec69b8e991-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348723 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348734 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqkd2\" (UniqueName: \"kubernetes.io/projected/075426ec-6034-40e1-9449-bfec69b8e991-kube-api-access-qqkd2\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348745 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6ctf\" (UniqueName: \"kubernetes.io/projected/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22-kube-api-access-b6ctf\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.348756 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e845226-3c75-48d0-9fbc-a22e885dac4d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.604398 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.607632 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f82e-account-create-update-86cb7" event={"ID":"e7cb1fa8-606f-44bb-9b88-85bfbf76ba22","Type":"ContainerDied","Data":"5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda"} Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.607667 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5195929cb44538a9c0408bcc7b41301dc83ac5f8ef4151dbcc10860f6056cfda" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.609354 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" event={"ID":"0e845226-3c75-48d0-9fbc-a22e885dac4d","Type":"ContainerDied","Data":"529266eeb18cb2c88e25bcbedee4f43f0b6e1ca1f5d901ff39f45acdf3ada547"} Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.609394 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="529266eeb18cb2c88e25bcbedee4f43f0b6e1ca1f5d901ff39f45acdf3ada547" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.609449 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c0a7-account-create-update-rbbpm" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.613052 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-rnnzv" event={"ID":"075426ec-6034-40e1-9449-bfec69b8e991","Type":"ContainerDied","Data":"009d119591790d6a0f11735914b575b835e0543c13cfb09fee49e86d5dc2918f"} Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.613378 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="009d119591790d6a0f11735914b575b835e0543c13cfb09fee49e86d5dc2918f" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.613125 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-rnnzv" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.615252 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.615251 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5040-account-create-update-cmfc4" event={"ID":"5640101e-33ca-4d7f-a5db-2ddd5f04cb03","Type":"ContainerDied","Data":"74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97"} Jan 29 11:02:14 crc kubenswrapper[4852]: I0129 11:02:14.615335 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74d035f487c8fcc34216aea616d58b78a935278d72497c7e89b01fa5d7bacc97" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.257186 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364655 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dbmv\" (UniqueName: \"kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364787 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364819 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364872 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364924 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.364953 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data\") pod \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\" (UID: \"be4b2847-5c03-4484-9df5-a3c65a5ffdfe\") " Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.365288 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.365614 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.365974 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.369686 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts" (OuterVolumeSpecName: "scripts") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.369715 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv" (OuterVolumeSpecName: "kube-api-access-8dbmv") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "kube-api-access-8dbmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.402011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.468041 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.468083 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dbmv\" (UniqueName: \"kubernetes.io/projected/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-kube-api-access-8dbmv\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.468097 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.468111 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.471406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.503251 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data" (OuterVolumeSpecName: "config-data") pod "be4b2847-5c03-4484-9df5-a3c65a5ffdfe" (UID: "be4b2847-5c03-4484-9df5-a3c65a5ffdfe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.569378 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.569731 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4b2847-5c03-4484-9df5-a3c65a5ffdfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.625187 4852 generic.go:334] "Generic (PLEG): container finished" podID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerID="5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c" exitCode=0 Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.625231 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerDied","Data":"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c"} Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.625262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4b2847-5c03-4484-9df5-a3c65a5ffdfe","Type":"ContainerDied","Data":"b83eff1cdcf5ac24d46373ad782ff1185c48add1f7f35beeeda9de5662e7f601"} Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.625280 4852 scope.go:117] "RemoveContainer" containerID="fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.625284 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.661451 4852 scope.go:117] "RemoveContainer" containerID="45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.695667 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.697012 4852 scope.go:117] "RemoveContainer" containerID="ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.718322 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.724545 4852 scope.go:117] "RemoveContainer" containerID="5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756202 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756818 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756842 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756855 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="sg-core" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756863 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="sg-core" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756877 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-notification-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756885 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-notification-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756898 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8eaa1c2-1b96-43b1-ba67-522014312ee6" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756906 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8eaa1c2-1b96-43b1-ba67-522014312ee6" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756918 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075426ec-6034-40e1-9449-bfec69b8e991" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756925 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="075426ec-6034-40e1-9449-bfec69b8e991" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756946 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="proxy-httpd" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756953 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="proxy-httpd" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756968 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5640101e-33ca-4d7f-a5db-2ddd5f04cb03" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.756976 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5640101e-33ca-4d7f-a5db-2ddd5f04cb03" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.756994 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e845226-3c75-48d0-9fbc-a22e885dac4d" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.757001 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e845226-3c75-48d0-9fbc-a22e885dac4d" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.757025 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-central-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.757034 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-central-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.757049 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8071177d-5f88-4cda-a3a8-b36eb0807a64" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.757057 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8071177d-5f88-4cda-a3a8-b36eb0807a64" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.757267 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-central-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.757290 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="ceilometer-notification-agent" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759172 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759192 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e845226-3c75-48d0-9fbc-a22e885dac4d" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759203 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8eaa1c2-1b96-43b1-ba67-522014312ee6" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759213 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="sg-core" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759225 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5640101e-33ca-4d7f-a5db-2ddd5f04cb03" containerName="mariadb-account-create-update" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759239 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" containerName="proxy-httpd" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759251 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8071177d-5f88-4cda-a3a8-b36eb0807a64" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.759264 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="075426ec-6034-40e1-9449-bfec69b8e991" containerName="mariadb-database-create" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.763618 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.769827 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.771732 4852 scope.go:117] "RemoveContainer" containerID="fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.771905 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.771924 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.777881 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff\": container with ID starting with fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff not found: ID does not exist" containerID="fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.777956 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff"} err="failed to get container status \"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff\": rpc error: code = NotFound desc = could not find container \"fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff\": container with ID starting with fcef47a590844593a1b47f4cd28f4b852069f4a1ecee59d8472dc20fe9b18cff not found: ID does not exist" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.778013 4852 scope.go:117] "RemoveContainer" containerID="45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.779835 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03\": container with ID starting with 45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03 not found: ID does not exist" containerID="45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.779875 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03"} err="failed to get container status \"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03\": rpc error: code = NotFound desc = could not find container \"45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03\": container with ID starting with 45e1f81f98caf772ba797e3a2b10fb4a694d353adade3153b0dceb7a0c220d03 not found: ID does not exist" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.779902 4852 scope.go:117] "RemoveContainer" containerID="ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.780260 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051\": container with ID starting with ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051 not found: ID does not exist" containerID="ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.780292 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051"} err="failed to get container status \"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051\": rpc error: code = NotFound desc = could not find container \"ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051\": container with ID starting with ffe1d21acaee87ec2b5ea0f4298275591bfd4c96ce4f0136fd04bc6120f3c051 not found: ID does not exist" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.780317 4852 scope.go:117] "RemoveContainer" containerID="5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c" Jan 29 11:02:15 crc kubenswrapper[4852]: E0129 11:02:15.780658 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c\": container with ID starting with 5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c not found: ID does not exist" containerID="5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.780684 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c"} err="failed to get container status \"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c\": rpc error: code = NotFound desc = could not find container \"5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c\": container with ID starting with 5a79d41f016b2604072481ccde3ed5ad041bc40fdf1437f115c19ac710d97a4c not found: ID does not exist" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.875782 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.875836 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7lqr\" (UniqueName: \"kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.875864 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.875984 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.876024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.876078 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.876173 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977454 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977500 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7lqr\" (UniqueName: \"kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977528 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977664 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977707 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977755 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.977839 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.978195 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.978252 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.982403 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.983006 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.983664 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.991269 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:15 crc kubenswrapper[4852]: I0129 11:02:15.994163 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7lqr\" (UniqueName: \"kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr\") pod \"ceilometer-0\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " pod="openstack/ceilometer-0" Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.087399 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.433334 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.470456 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.483062 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.483295 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-log" containerID="cri-o://816812f230b8d0a4042e6e45d4ac7ad6d929f47b4ccbc9ef05c1721695841714" gracePeriod=30 Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.483437 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-httpd" containerID="cri-o://fedc4b54914d87c6de3a8d3692807080d96fd9ab2c84838df3fc1677b015647e" gracePeriod=30 Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.550133 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.550398 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5b5f6975bd-d8nwg" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-log" containerID="cri-o://3949177a1a88dba32b3c67ee67ae43de9ff434d49383b5d5cc8f3b4d955a2354" gracePeriod=30 Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.550533 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5b5f6975bd-d8nwg" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-api" containerID="cri-o://d2ebe4dcbc8ae0bc7c37061797a6a738c81fd818fae0c31cb5e242d628b9995e" gracePeriod=30 Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.619132 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.651207 4852 generic.go:334] "Generic (PLEG): container finished" podID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerID="816812f230b8d0a4042e6e45d4ac7ad6d929f47b4ccbc9ef05c1721695841714" exitCode=143 Jan 29 11:02:16 crc kubenswrapper[4852]: I0129 11:02:16.651308 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerDied","Data":"816812f230b8d0a4042e6e45d4ac7ad6d929f47b4ccbc9ef05c1721695841714"} Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.392129 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.392724 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-log" containerID="cri-o://c6609e5d726de654a9cb499621fa77544de875cce035a45226fea618a56c2edd" gracePeriod=30 Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.393252 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-httpd" containerID="cri-o://074535fc1a848dd48010bf1cb1212242e2fdbd9fa2595875fd1eedda35702c32" gracePeriod=30 Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.473455 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be4b2847-5c03-4484-9df5-a3c65a5ffdfe" path="/var/lib/kubelet/pods/be4b2847-5c03-4484-9df5-a3c65a5ffdfe/volumes" Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.667426 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerStarted","Data":"0f27e0e1c191b873cf9a5bdf359d242c8b92b5d329b851cd102fd60f97115195"} Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.667468 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerStarted","Data":"101959c547f344e78cbd87ff9614a1cfff6b76cacf67df96a3a4e5ef5c6639d1"} Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.669478 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerID="c6609e5d726de654a9cb499621fa77544de875cce035a45226fea618a56c2edd" exitCode=143 Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.669558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerDied","Data":"c6609e5d726de654a9cb499621fa77544de875cce035a45226fea618a56c2edd"} Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.671827 4852 generic.go:334] "Generic (PLEG): container finished" podID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerID="3949177a1a88dba32b3c67ee67ae43de9ff434d49383b5d5cc8f3b4d955a2354" exitCode=143 Jan 29 11:02:17 crc kubenswrapper[4852]: I0129 11:02:17.671872 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerDied","Data":"3949177a1a88dba32b3c67ee67ae43de9ff434d49383b5d5cc8f3b4d955a2354"} Jan 29 11:02:18 crc kubenswrapper[4852]: I0129 11:02:18.058008 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:18 crc kubenswrapper[4852]: I0129 11:02:18.687136 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerStarted","Data":"00a321e95466df96dbb580df9226846873e53264250d5801c61aec95276f7daf"} Jan 29 11:02:19 crc kubenswrapper[4852]: I0129 11:02:19.698426 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerStarted","Data":"fce44640a9230b8755b90dcc98837f305ed839d01650b8f78aa56e8556597223"} Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.371119 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-v5wld"] Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.372469 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.377699 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-v95rw" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.377956 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.379291 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.387763 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-v5wld"] Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.490389 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.490540 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgg67\" (UniqueName: \"kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.490608 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.490767 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.594336 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.594694 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.594816 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgg67\" (UniqueName: \"kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.594867 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.601313 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.602342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.606234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.623164 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgg67\" (UniqueName: \"kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67\") pod \"nova-cell0-conductor-db-sync-v5wld\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.716765 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.717195 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerID="074535fc1a848dd48010bf1cb1212242e2fdbd9fa2595875fd1eedda35702c32" exitCode=0 Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.717280 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerDied","Data":"074535fc1a848dd48010bf1cb1212242e2fdbd9fa2595875fd1eedda35702c32"} Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.719176 4852 generic.go:334] "Generic (PLEG): container finished" podID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerID="fedc4b54914d87c6de3a8d3692807080d96fd9ab2c84838df3fc1677b015647e" exitCode=0 Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.719215 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerDied","Data":"fedc4b54914d87c6de3a8d3692807080d96fd9ab2c84838df3fc1677b015647e"} Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.727775 4852 generic.go:334] "Generic (PLEG): container finished" podID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerID="d2ebe4dcbc8ae0bc7c37061797a6a738c81fd818fae0c31cb5e242d628b9995e" exitCode=0 Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.727813 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerDied","Data":"d2ebe4dcbc8ae0bc7c37061797a6a738c81fd818fae0c31cb5e242d628b9995e"} Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.927109 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:02:20 crc kubenswrapper[4852]: I0129 11:02:20.947502 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002245 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb6wx\" (UniqueName: \"kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002302 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002341 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002385 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002400 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002427 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002489 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002518 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002542 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002566 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002634 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data\") pod \"73161d78-6281-43c9-a807-b6bc7c0dde4b\" (UID: \"73161d78-6281-43c9-a807-b6bc7c0dde4b\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002659 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002674 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002703 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfsk7\" (UniqueName: \"kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.002723 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts\") pod \"41d17b94-5c6f-4736-b3bc-0a953abfc223\" (UID: \"41d17b94-5c6f-4736-b3bc-0a953abfc223\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.006048 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs" (OuterVolumeSpecName: "logs") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.006365 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.007018 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs" (OuterVolumeSpecName: "logs") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.026897 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx" (OuterVolumeSpecName: "kube-api-access-nb6wx") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "kube-api-access-nb6wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.045396 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts" (OuterVolumeSpecName: "scripts") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.045474 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.045521 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7" (OuterVolumeSpecName: "kube-api-access-mfsk7") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "kube-api-access-mfsk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.067944 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts" (OuterVolumeSpecName: "scripts") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.087701 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data" (OuterVolumeSpecName: "config-data") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105612 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105648 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41d17b94-5c6f-4736-b3bc-0a953abfc223-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105660 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfsk7\" (UniqueName: \"kubernetes.io/projected/41d17b94-5c6f-4736-b3bc-0a953abfc223-kube-api-access-mfsk7\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105676 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105686 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb6wx\" (UniqueName: \"kubernetes.io/projected/73161d78-6281-43c9-a807-b6bc7c0dde4b-kube-api-access-nb6wx\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105696 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105706 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73161d78-6281-43c9-a807-b6bc7c0dde4b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105732 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.105743 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.159123 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.188132 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.205003 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.208094 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.208137 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.208151 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.212629 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data" (OuterVolumeSpecName: "config-data") pod "73161d78-6281-43c9-a807-b6bc7c0dde4b" (UID: "73161d78-6281-43c9-a807-b6bc7c0dde4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.243903 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.247359 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.260409 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309352 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309449 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309730 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309856 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309890 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.309924 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjm5t\" (UniqueName: \"kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t\") pod \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\" (UID: \"ca037db5-f99e-4aa3-9725-68ac7fce0bf8\") " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.310545 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.310574 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.310618 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73161d78-6281-43c9-a807-b6bc7c0dde4b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.310694 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "41d17b94-5c6f-4736-b3bc-0a953abfc223" (UID: "41d17b94-5c6f-4736-b3bc-0a953abfc223"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.313132 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs" (OuterVolumeSpecName: "logs") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.315694 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.317509 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts" (OuterVolumeSpecName: "scripts") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.317701 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t" (OuterVolumeSpecName: "kube-api-access-qjm5t") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "kube-api-access-qjm5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.319295 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.358180 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.362635 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.391265 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data" (OuterVolumeSpecName: "config-data") pod "ca037db5-f99e-4aa3-9725-68ac7fce0bf8" (UID: "ca037db5-f99e-4aa3-9725-68ac7fce0bf8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412615 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412655 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412665 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjm5t\" (UniqueName: \"kubernetes.io/projected/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-kube-api-access-qjm5t\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412677 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412686 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412695 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/41d17b94-5c6f-4736-b3bc-0a953abfc223-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412704 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412740 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.412748 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ca037db5-f99e-4aa3-9725-68ac7fce0bf8-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.434435 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-v5wld"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.437760 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.514772 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.738240 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-v5wld" event={"ID":"29200a98-1d5c-4122-bfba-d66f4b12b5e0","Type":"ContainerStarted","Data":"51169a1446501d1f89e0dea2444310e72ade6c64521f7fefd87b3da5f4da576e"} Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.741612 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-central-agent" containerID="cri-o://0f27e0e1c191b873cf9a5bdf359d242c8b92b5d329b851cd102fd60f97115195" gracePeriod=30 Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.741724 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-notification-agent" containerID="cri-o://00a321e95466df96dbb580df9226846873e53264250d5801c61aec95276f7daf" gracePeriod=30 Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.741800 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="proxy-httpd" containerID="cri-o://2d505cc7b9ca5f2381c34c27ddcf1b5d32cfc7d4616586ea617a901e0da0d825" gracePeriod=30 Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.741439 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerStarted","Data":"2d505cc7b9ca5f2381c34c27ddcf1b5d32cfc7d4616586ea617a901e0da0d825"} Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.742025 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.741686 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="sg-core" containerID="cri-o://fce44640a9230b8755b90dcc98837f305ed839d01650b8f78aa56e8556597223" gracePeriod=30 Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.743697 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ca037db5-f99e-4aa3-9725-68ac7fce0bf8","Type":"ContainerDied","Data":"87111d58a02619facf35301fbfb476e198cf5647b0ab4f8c56a34cd15946f223"} Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.743732 4852 scope.go:117] "RemoveContainer" containerID="074535fc1a848dd48010bf1cb1212242e2fdbd9fa2595875fd1eedda35702c32" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.743790 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.758071 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"73161d78-6281-43c9-a807-b6bc7c0dde4b","Type":"ContainerDied","Data":"0d16749121d381bac9154c29947c4f92ada72907eb2790138f57ecde3561c442"} Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.758184 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.768990 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5b5f6975bd-d8nwg" event={"ID":"41d17b94-5c6f-4736-b3bc-0a953abfc223","Type":"ContainerDied","Data":"e4f94c23b1be17e7f0be8e2fa9ddd10dc135dc085590b6bf97c60da54aeeada5"} Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.769062 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5b5f6975bd-d8nwg" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.775004 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.957618977 podStartE2EDuration="6.774985054s" podCreationTimestamp="2026-01-29 11:02:15 +0000 UTC" firstStartedPulling="2026-01-29 11:02:16.645523511 +0000 UTC m=+1233.862854655" lastFinishedPulling="2026-01-29 11:02:20.462889598 +0000 UTC m=+1237.680220732" observedRunningTime="2026-01-29 11:02:21.769891429 +0000 UTC m=+1238.987222563" watchObservedRunningTime="2026-01-29 11:02:21.774985054 +0000 UTC m=+1238.992316188" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.802531 4852 scope.go:117] "RemoveContainer" containerID="c6609e5d726de654a9cb499621fa77544de875cce035a45226fea618a56c2edd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.810620 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.818851 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.849407 4852 scope.go:117] "RemoveContainer" containerID="fedc4b54914d87c6de3a8d3692807080d96fd9ab2c84838df3fc1677b015647e" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.849595 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.870301 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.890460 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891019 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-api" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891089 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-api" Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891142 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891192 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891253 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891304 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891389 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891449 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891505 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891557 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-log" Jan 29 11:02:21 crc kubenswrapper[4852]: E0129 11:02:21.891652 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891708 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.891925 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.892013 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.892091 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.892154 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-log" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.892207 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" containerName="glance-httpd" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.892262 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" containerName="placement-api" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.893617 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.896160 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.896384 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-php2d" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.896669 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.897223 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.907893 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.912113 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5b5f6975bd-d8nwg"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920171 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfx4v\" (UniqueName: \"kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920219 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920240 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920350 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920404 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920442 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.920463 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.924862 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.926395 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.928986 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.929161 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.935530 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.949542 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:21 crc kubenswrapper[4852]: I0129 11:02:21.950961 4852 scope.go:117] "RemoveContainer" containerID="816812f230b8d0a4042e6e45d4ac7ad6d929f47b4ccbc9ef05c1721695841714" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.003937 4852 scope.go:117] "RemoveContainer" containerID="d2ebe4dcbc8ae0bc7c37061797a6a738c81fd818fae0c31cb5e242d628b9995e" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021644 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021707 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021767 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfx4v\" (UniqueName: \"kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021804 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021826 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021923 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.021940 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.022315 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.022424 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.022820 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.023297 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.028508 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.034359 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.034651 4852 scope.go:117] "RemoveContainer" containerID="3949177a1a88dba32b3c67ee67ae43de9ff434d49383b5d5cc8f3b4d955a2354" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.039136 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.039779 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.039807 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfx4v\" (UniqueName: \"kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.052745 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124229 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124296 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124509 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124725 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b574\" (UniqueName: \"kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124773 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124922 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.124994 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.224415 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226024 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b574\" (UniqueName: \"kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226058 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226132 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226176 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226218 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226270 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226352 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.226926 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.227522 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.228605 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.232147 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.232570 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.234209 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.235117 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.243705 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b574\" (UniqueName: \"kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.261603 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.541255 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782489 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerID="2d505cc7b9ca5f2381c34c27ddcf1b5d32cfc7d4616586ea617a901e0da0d825" exitCode=0 Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782528 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerID="fce44640a9230b8755b90dcc98837f305ed839d01650b8f78aa56e8556597223" exitCode=2 Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782539 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerID="00a321e95466df96dbb580df9226846873e53264250d5801c61aec95276f7daf" exitCode=0 Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerDied","Data":"2d505cc7b9ca5f2381c34c27ddcf1b5d32cfc7d4616586ea617a901e0da0d825"} Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782628 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerDied","Data":"fce44640a9230b8755b90dcc98837f305ed839d01650b8f78aa56e8556597223"} Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.782638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerDied","Data":"00a321e95466df96dbb580df9226846873e53264250d5801c61aec95276f7daf"} Jan 29 11:02:22 crc kubenswrapper[4852]: I0129 11:02:22.827630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.003348 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:02:23 crc kubenswrapper[4852]: W0129 11:02:23.026952 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda29eddc2_c6bd_46c0_ba00_5a08b8b6793e.slice/crio-2021a6739ab6a8fc799c4e44a58a0f73eec73d8aa20969e0e2e36294cda2671e WatchSource:0}: Error finding container 2021a6739ab6a8fc799c4e44a58a0f73eec73d8aa20969e0e2e36294cda2671e: Status 404 returned error can't find the container with id 2021a6739ab6a8fc799c4e44a58a0f73eec73d8aa20969e0e2e36294cda2671e Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.506281 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41d17b94-5c6f-4736-b3bc-0a953abfc223" path="/var/lib/kubelet/pods/41d17b94-5c6f-4736-b3bc-0a953abfc223/volumes" Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.507526 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73161d78-6281-43c9-a807-b6bc7c0dde4b" path="/var/lib/kubelet/pods/73161d78-6281-43c9-a807-b6bc7c0dde4b/volumes" Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.508505 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca037db5-f99e-4aa3-9725-68ac7fce0bf8" path="/var/lib/kubelet/pods/ca037db5-f99e-4aa3-9725-68ac7fce0bf8/volumes" Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.802410 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerStarted","Data":"d75357c4635ac4b27b487dfd891bbe8e4a30c70a0f91635d4c51f8b3a4c92c2b"} Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.802790 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerStarted","Data":"3e8cb7241d510c2d3750ad6e18056562f6adb3fec55eb5c38ef0e670cbcc97e0"} Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.806423 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerStarted","Data":"ce3f6c95a078dc3bea52fa656715a9c015827b3d5a2ef4d87997ac240eeab0e7"} Jan 29 11:02:23 crc kubenswrapper[4852]: I0129 11:02:23.806452 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerStarted","Data":"2021a6739ab6a8fc799c4e44a58a0f73eec73d8aa20969e0e2e36294cda2671e"} Jan 29 11:02:24 crc kubenswrapper[4852]: I0129 11:02:24.817823 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerStarted","Data":"d56126df6c76ee4d2b57d1a9bfa70a3c707884469080624229983943f11c8570"} Jan 29 11:02:24 crc kubenswrapper[4852]: I0129 11:02:24.821652 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerStarted","Data":"496427d75669b53712641a841d59135547182e1cbb1fb27f1360eb43642abdda"} Jan 29 11:02:24 crc kubenswrapper[4852]: I0129 11:02:24.873127 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.8731008080000002 podStartE2EDuration="3.873100808s" podCreationTimestamp="2026-01-29 11:02:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:24.86625009 +0000 UTC m=+1242.083581234" watchObservedRunningTime="2026-01-29 11:02:24.873100808 +0000 UTC m=+1242.090431952" Jan 29 11:02:24 crc kubenswrapper[4852]: I0129 11:02:24.873896 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.873872026 podStartE2EDuration="3.873872026s" podCreationTimestamp="2026-01-29 11:02:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:24.849282346 +0000 UTC m=+1242.066613470" watchObservedRunningTime="2026-01-29 11:02:24.873872026 +0000 UTC m=+1242.091203160" Jan 29 11:02:25 crc kubenswrapper[4852]: I0129 11:02:25.834487 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerID="0f27e0e1c191b873cf9a5bdf359d242c8b92b5d329b851cd102fd60f97115195" exitCode=0 Jan 29 11:02:25 crc kubenswrapper[4852]: I0129 11:02:25.834602 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerDied","Data":"0f27e0e1c191b873cf9a5bdf359d242c8b92b5d329b851cd102fd60f97115195"} Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.184234 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.266503 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.266829 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.266957 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267107 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267211 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7lqr\" (UniqueName: \"kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267393 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd\") pod \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\" (UID: \"9d51bc65-d195-46dc-ab26-5b39da3c0f7b\") " Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267530 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.267999 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.268544 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.272167 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr" (OuterVolumeSpecName: "kube-api-access-c7lqr") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "kube-api-access-c7lqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.272269 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts" (OuterVolumeSpecName: "scripts") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.314669 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.357487 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.369357 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.369397 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.369411 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.369423 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.369437 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7lqr\" (UniqueName: \"kubernetes.io/projected/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-kube-api-access-c7lqr\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.392617 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data" (OuterVolumeSpecName: "config-data") pod "9d51bc65-d195-46dc-ab26-5b39da3c0f7b" (UID: "9d51bc65-d195-46dc-ab26-5b39da3c0f7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.470438 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d51bc65-d195-46dc-ab26-5b39da3c0f7b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.913517 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-v5wld" event={"ID":"29200a98-1d5c-4122-bfba-d66f4b12b5e0","Type":"ContainerStarted","Data":"f13b266639c225546baf5b7b3099ea5770dc5dd5ff4c67f836f6b1c6c8367019"} Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.917771 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d51bc65-d195-46dc-ab26-5b39da3c0f7b","Type":"ContainerDied","Data":"101959c547f344e78cbd87ff9614a1cfff6b76cacf67df96a3a4e5ef5c6639d1"} Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.917823 4852 scope.go:117] "RemoveContainer" containerID="2d505cc7b9ca5f2381c34c27ddcf1b5d32cfc7d4616586ea617a901e0da0d825" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.917955 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.930273 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-v5wld" podStartSLOduration=2.117049986 podStartE2EDuration="9.930254002s" podCreationTimestamp="2026-01-29 11:02:20 +0000 UTC" firstStartedPulling="2026-01-29 11:02:21.433020615 +0000 UTC m=+1238.650351749" lastFinishedPulling="2026-01-29 11:02:29.246224631 +0000 UTC m=+1246.463555765" observedRunningTime="2026-01-29 11:02:29.929146455 +0000 UTC m=+1247.146477579" watchObservedRunningTime="2026-01-29 11:02:29.930254002 +0000 UTC m=+1247.147585136" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.949771 4852 scope.go:117] "RemoveContainer" containerID="fce44640a9230b8755b90dcc98837f305ed839d01650b8f78aa56e8556597223" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.968460 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.981653 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.991120 4852 scope.go:117] "RemoveContainer" containerID="00a321e95466df96dbb580df9226846873e53264250d5801c61aec95276f7daf" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.997280 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:29 crc kubenswrapper[4852]: E0129 11:02:29.997712 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-notification-agent" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.997734 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-notification-agent" Jan 29 11:02:29 crc kubenswrapper[4852]: E0129 11:02:29.997777 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="sg-core" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.997786 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="sg-core" Jan 29 11:02:29 crc kubenswrapper[4852]: E0129 11:02:29.997798 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-central-agent" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.997807 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-central-agent" Jan 29 11:02:29 crc kubenswrapper[4852]: E0129 11:02:29.997833 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="proxy-httpd" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.997842 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="proxy-httpd" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.998042 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-notification-agent" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.998072 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="proxy-httpd" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.998087 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="sg-core" Jan 29 11:02:29 crc kubenswrapper[4852]: I0129 11:02:29.998100 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" containerName="ceilometer-central-agent" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.000132 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.004032 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.004261 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.026144 4852 scope.go:117] "RemoveContainer" containerID="0f27e0e1c191b873cf9a5bdf359d242c8b92b5d329b851cd102fd60f97115195" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.026386 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107470 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107514 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrm4c\" (UniqueName: \"kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107553 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107572 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107683 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107710 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.107807 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209538 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209634 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209723 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209752 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrm4c\" (UniqueName: \"kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209804 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.209832 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.210071 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.210308 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.214912 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.215918 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.216938 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.224761 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.231114 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrm4c\" (UniqueName: \"kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c\") pod \"ceilometer-0\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.319185 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.777655 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:30 crc kubenswrapper[4852]: W0129 11:02:30.782254 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8af8e94_f9a1_4f5a_ba3a_0916ff9f0456.slice/crio-9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566 WatchSource:0}: Error finding container 9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566: Status 404 returned error can't find the container with id 9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566 Jan 29 11:02:30 crc kubenswrapper[4852]: I0129 11:02:30.928574 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerStarted","Data":"9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566"} Jan 29 11:02:31 crc kubenswrapper[4852]: I0129 11:02:31.401256 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:02:31 crc kubenswrapper[4852]: I0129 11:02:31.472265 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d51bc65-d195-46dc-ab26-5b39da3c0f7b" path="/var/lib/kubelet/pods/9d51bc65-d195-46dc-ab26-5b39da3c0f7b/volumes" Jan 29 11:02:31 crc kubenswrapper[4852]: I0129 11:02:31.958662 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerStarted","Data":"5f8fce3bcf739f9e07dc49117c782949efc448a9336705688d06b41810eb0c29"} Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.225095 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.225469 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.257824 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.269531 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.542365 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.542429 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.569450 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.585037 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.982750 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerStarted","Data":"6ee122dee8f0a7226fd55a5feef48ff0fe98dc60f8c10f7012db4ecc52018411"} Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.982982 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.983083 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.983160 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerStarted","Data":"bea28cac6b094c8ec53ab97bca35cb15116452dad13ef07cdeeda51c512d43aa"} Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.983393 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 11:02:32 crc kubenswrapper[4852]: I0129 11:02:32.983423 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.985160 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.998771 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999056 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-central-agent" containerID="cri-o://5f8fce3bcf739f9e07dc49117c782949efc448a9336705688d06b41810eb0c29" gracePeriod=30 Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999356 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerStarted","Data":"e6181736252555859f974f1832bd23c419f205a5658446a9de578e5e2bf92917"} Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999412 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999448 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="proxy-httpd" containerID="cri-o://e6181736252555859f974f1832bd23c419f205a5658446a9de578e5e2bf92917" gracePeriod=30 Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999515 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="sg-core" containerID="cri-o://6ee122dee8f0a7226fd55a5feef48ff0fe98dc60f8c10f7012db4ecc52018411" gracePeriod=30 Jan 29 11:02:34 crc kubenswrapper[4852]: I0129 11:02:34.999554 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-notification-agent" containerID="cri-o://bea28cac6b094c8ec53ab97bca35cb15116452dad13ef07cdeeda51c512d43aa" gracePeriod=30 Jan 29 11:02:35 crc kubenswrapper[4852]: I0129 11:02:35.032602 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.117090181 podStartE2EDuration="6.03256241s" podCreationTimestamp="2026-01-29 11:02:29 +0000 UTC" firstStartedPulling="2026-01-29 11:02:30.784546331 +0000 UTC m=+1248.001877475" lastFinishedPulling="2026-01-29 11:02:34.70001857 +0000 UTC m=+1251.917349704" observedRunningTime="2026-01-29 11:02:35.029157456 +0000 UTC m=+1252.246488590" watchObservedRunningTime="2026-01-29 11:02:35.03256241 +0000 UTC m=+1252.249893544" Jan 29 11:02:35 crc kubenswrapper[4852]: I0129 11:02:35.068440 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 11:02:35 crc kubenswrapper[4852]: I0129 11:02:35.172093 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 11:02:35 crc kubenswrapper[4852]: I0129 11:02:35.172252 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 11:02:35 crc kubenswrapper[4852]: I0129 11:02:35.183082 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 11:02:36 crc kubenswrapper[4852]: I0129 11:02:36.008549 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerID="6ee122dee8f0a7226fd55a5feef48ff0fe98dc60f8c10f7012db4ecc52018411" exitCode=2 Jan 29 11:02:36 crc kubenswrapper[4852]: I0129 11:02:36.008850 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerID="bea28cac6b094c8ec53ab97bca35cb15116452dad13ef07cdeeda51c512d43aa" exitCode=0 Jan 29 11:02:36 crc kubenswrapper[4852]: I0129 11:02:36.008603 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerDied","Data":"6ee122dee8f0a7226fd55a5feef48ff0fe98dc60f8c10f7012db4ecc52018411"} Jan 29 11:02:36 crc kubenswrapper[4852]: I0129 11:02:36.008928 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerDied","Data":"bea28cac6b094c8ec53ab97bca35cb15116452dad13ef07cdeeda51c512d43aa"} Jan 29 11:02:37 crc kubenswrapper[4852]: I0129 11:02:37.026982 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerID="5f8fce3bcf739f9e07dc49117c782949efc448a9336705688d06b41810eb0c29" exitCode=0 Jan 29 11:02:37 crc kubenswrapper[4852]: I0129 11:02:37.027050 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerDied","Data":"5f8fce3bcf739f9e07dc49117c782949efc448a9336705688d06b41810eb0c29"} Jan 29 11:02:42 crc kubenswrapper[4852]: I0129 11:02:42.094019 4852 generic.go:334] "Generic (PLEG): container finished" podID="29200a98-1d5c-4122-bfba-d66f4b12b5e0" containerID="f13b266639c225546baf5b7b3099ea5770dc5dd5ff4c67f836f6b1c6c8367019" exitCode=0 Jan 29 11:02:42 crc kubenswrapper[4852]: I0129 11:02:42.094082 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-v5wld" event={"ID":"29200a98-1d5c-4122-bfba-d66f4b12b5e0","Type":"ContainerDied","Data":"f13b266639c225546baf5b7b3099ea5770dc5dd5ff4c67f836f6b1c6c8367019"} Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.450393 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.477980 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgg67\" (UniqueName: \"kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67\") pod \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.478062 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data\") pod \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.478081 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts\") pod \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.478103 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle\") pod \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\" (UID: \"29200a98-1d5c-4122-bfba-d66f4b12b5e0\") " Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.486725 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts" (OuterVolumeSpecName: "scripts") pod "29200a98-1d5c-4122-bfba-d66f4b12b5e0" (UID: "29200a98-1d5c-4122-bfba-d66f4b12b5e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.486902 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67" (OuterVolumeSpecName: "kube-api-access-jgg67") pod "29200a98-1d5c-4122-bfba-d66f4b12b5e0" (UID: "29200a98-1d5c-4122-bfba-d66f4b12b5e0"). InnerVolumeSpecName "kube-api-access-jgg67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.509726 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data" (OuterVolumeSpecName: "config-data") pod "29200a98-1d5c-4122-bfba-d66f4b12b5e0" (UID: "29200a98-1d5c-4122-bfba-d66f4b12b5e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.514715 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29200a98-1d5c-4122-bfba-d66f4b12b5e0" (UID: "29200a98-1d5c-4122-bfba-d66f4b12b5e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.580400 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgg67\" (UniqueName: \"kubernetes.io/projected/29200a98-1d5c-4122-bfba-d66f4b12b5e0-kube-api-access-jgg67\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.580431 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.580441 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:43 crc kubenswrapper[4852]: I0129 11:02:43.580450 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29200a98-1d5c-4122-bfba-d66f4b12b5e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.114751 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-v5wld" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.114805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-v5wld" event={"ID":"29200a98-1d5c-4122-bfba-d66f4b12b5e0","Type":"ContainerDied","Data":"51169a1446501d1f89e0dea2444310e72ade6c64521f7fefd87b3da5f4da576e"} Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.115176 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51169a1446501d1f89e0dea2444310e72ade6c64521f7fefd87b3da5f4da576e" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.209990 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:02:44 crc kubenswrapper[4852]: E0129 11:02:44.210754 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29200a98-1d5c-4122-bfba-d66f4b12b5e0" containerName="nova-cell0-conductor-db-sync" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.210779 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="29200a98-1d5c-4122-bfba-d66f4b12b5e0" containerName="nova-cell0-conductor-db-sync" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.210995 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="29200a98-1d5c-4122-bfba-d66f4b12b5e0" containerName="nova-cell0-conductor-db-sync" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.212682 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.215166 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-v95rw" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.215378 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.230061 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.394210 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.394273 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4g8l\" (UniqueName: \"kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.394355 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.496711 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.496923 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.496960 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4g8l\" (UniqueName: \"kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.501214 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.501427 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.512460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4g8l\" (UniqueName: \"kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l\") pod \"nova-cell0-conductor-0\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:44 crc kubenswrapper[4852]: I0129 11:02:44.533833 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:45 crc kubenswrapper[4852]: I0129 11:02:45.036087 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:02:45 crc kubenswrapper[4852]: I0129 11:02:45.123509 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2d824719-4789-4d55-a1ec-2602e98d8b53","Type":"ContainerStarted","Data":"413d1e7d316d1a4b8f32935fdbd849c634e863043fc47253597c844e049d9e91"} Jan 29 11:02:46 crc kubenswrapper[4852]: I0129 11:02:46.133497 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2d824719-4789-4d55-a1ec-2602e98d8b53","Type":"ContainerStarted","Data":"709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e"} Jan 29 11:02:46 crc kubenswrapper[4852]: I0129 11:02:46.134039 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:46 crc kubenswrapper[4852]: I0129 11:02:46.157382 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.157362941 podStartE2EDuration="2.157362941s" podCreationTimestamp="2026-01-29 11:02:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:46.148522695 +0000 UTC m=+1263.365853829" watchObservedRunningTime="2026-01-29 11:02:46.157362941 +0000 UTC m=+1263.374694075" Jan 29 11:02:54 crc kubenswrapper[4852]: I0129 11:02:54.564274 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.243543 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-nwllr"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.245100 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.248273 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.248273 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.252516 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-nwllr"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.400108 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p56wq\" (UniqueName: \"kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.400158 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.400203 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.400271 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.439368 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.440544 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.443100 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.451895 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.495973 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.497526 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.511007 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.511255 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.511500 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p56wq\" (UniqueName: \"kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.511534 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.513858 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.519168 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.520176 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.523512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.544796 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.550981 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p56wq\" (UniqueName: \"kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq\") pod \"nova-cell0-cell-mapping-nwllr\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.570010 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.599126 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.601891 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.613001 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.613996 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614041 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614087 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614125 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614141 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614157 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgfsw\" (UniqueName: \"kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.614180 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh9wc\" (UniqueName: \"kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.648089 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.649335 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.652237 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.669587 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.675808 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.678003 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.689292 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.702640 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.716362 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.716526 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717384 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717426 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717476 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgfsw\" (UniqueName: \"kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717567 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh9wc\" (UniqueName: \"kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717644 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717680 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717710 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wbdm\" (UniqueName: \"kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717767 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717955 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.717983 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.722044 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.723296 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.725198 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.731702 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.737189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.740819 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh9wc\" (UniqueName: \"kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc\") pod \"nova-scheduler-0\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.746706 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgfsw\" (UniqueName: \"kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw\") pod \"nova-metadata-0\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " pod="openstack/nova-metadata-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.761013 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820057 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820303 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqzsx\" (UniqueName: \"kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820337 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820358 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wbdm\" (UniqueName: \"kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.820397 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821302 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkjm6\" (UniqueName: \"kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821416 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821435 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821476 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821551 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821599 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.821628 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.823095 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.823969 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.824836 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.825743 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.826173 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.848063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wbdm\" (UniqueName: \"kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm\") pod \"dnsmasq-dns-865f5d856f-blg6c\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.923899 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.923943 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.923984 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.924014 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.924082 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.924125 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqzsx\" (UniqueName: \"kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.924709 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.924894 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkjm6\" (UniqueName: \"kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.932386 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.935107 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.946242 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.946865 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkjm6\" (UniqueName: \"kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6\") pod \"nova-cell1-novncproxy-0\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.958502 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.968878 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqzsx\" (UniqueName: \"kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx\") pod \"nova-api-0\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " pod="openstack/nova-api-0" Jan 29 11:02:55 crc kubenswrapper[4852]: I0129 11:02:55.985267 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.014707 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.086066 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.099274 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.113396 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-nwllr"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.235794 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nwllr" event={"ID":"2ce36949-28e5-43d1-ae87-7cf9113cc884","Type":"ContainerStarted","Data":"fef5b0bf9b014fe63c2f9c8a62c1d8c4bb9e45522915a5d3e8f9465a5eb3251e"} Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.384047 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.483797 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-nt5bd"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.485944 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.490102 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.491075 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.501103 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-nt5bd"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.631170 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.651865 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.651998 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.652024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.652486 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g5vf\" (UniqueName: \"kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.695471 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.756199 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g5vf\" (UniqueName: \"kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.756309 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.756349 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.756368 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.763033 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.763613 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.763961 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.779597 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.781765 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g5vf\" (UniqueName: \"kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf\") pod \"nova-cell1-conductor-db-sync-nt5bd\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.819277 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:02:56 crc kubenswrapper[4852]: I0129 11:02:56.827646 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.253527 4852 generic.go:334] "Generic (PLEG): container finished" podID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerID="f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7" exitCode=0 Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.253635 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" event={"ID":"3a1d369c-e67c-4fa5-897f-38a385e6841b","Type":"ContainerDied","Data":"f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.253826 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" event={"ID":"3a1d369c-e67c-4fa5-897f-38a385e6841b","Type":"ContainerStarted","Data":"471c51ea4bca1f7aa9cbbdefbd79d8c7efce62b906ff81f8edcc5b08ad3e1613"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.263455 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5a6901f-6ad2-4a55-979c-5af162a11e87","Type":"ContainerStarted","Data":"10bfd56d573936878281c8e99381c7a3e9e571563e9844ba8d43c6c06a85cf19"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.266896 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerStarted","Data":"a42953459a1c3742453f9c3d574a10a4df019f776d3ae1434f0004b7359fa7fc"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.289859 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerStarted","Data":"3f5f9df82850faa8f5e824b99dfb497a37bf276ca642065b32f694b424bfbfd3"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.307880 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nwllr" event={"ID":"2ce36949-28e5-43d1-ae87-7cf9113cc884","Type":"ContainerStarted","Data":"b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.319417 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d","Type":"ContainerStarted","Data":"f222946a84b5e67fcaf5003e2d41ee333864c90aa5361fbe4f38d17d27e0d985"} Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.343557 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-nt5bd"] Jan 29 11:02:57 crc kubenswrapper[4852]: I0129 11:02:57.348033 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-nwllr" podStartSLOduration=2.348011591 podStartE2EDuration="2.348011591s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:57.336873179 +0000 UTC m=+1274.554204313" watchObservedRunningTime="2026-01-29 11:02:57.348011591 +0000 UTC m=+1274.565342735" Jan 29 11:02:58 crc kubenswrapper[4852]: I0129 11:02:58.333844 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" event={"ID":"09e692ef-fb62-44d3-8b88-09fa15eaae6f","Type":"ContainerStarted","Data":"997dcd23b056aaad9a977198f37d86b72e4af2e2a523b009c2a3efd396195c6c"} Jan 29 11:02:58 crc kubenswrapper[4852]: I0129 11:02:58.333912 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" event={"ID":"09e692ef-fb62-44d3-8b88-09fa15eaae6f","Type":"ContainerStarted","Data":"f3bc4cc541657d39e1be2dda2a169324c0209d0c97a1b923dcf6c81a58024369"} Jan 29 11:02:58 crc kubenswrapper[4852]: I0129 11:02:58.341891 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" event={"ID":"3a1d369c-e67c-4fa5-897f-38a385e6841b","Type":"ContainerStarted","Data":"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85"} Jan 29 11:02:58 crc kubenswrapper[4852]: I0129 11:02:58.342041 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:02:58 crc kubenswrapper[4852]: I0129 11:02:58.358704 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" podStartSLOduration=2.358685577 podStartE2EDuration="2.358685577s" podCreationTimestamp="2026-01-29 11:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:58.352565228 +0000 UTC m=+1275.569896372" watchObservedRunningTime="2026-01-29 11:02:58.358685577 +0000 UTC m=+1275.576016701" Jan 29 11:02:59 crc kubenswrapper[4852]: I0129 11:02:59.503667 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" podStartSLOduration=4.503641722 podStartE2EDuration="4.503641722s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:02:58.384205551 +0000 UTC m=+1275.601536685" watchObservedRunningTime="2026-01-29 11:02:59.503641722 +0000 UTC m=+1276.720972866" Jan 29 11:02:59 crc kubenswrapper[4852]: I0129 11:02:59.509123 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:02:59 crc kubenswrapper[4852]: I0129 11:02:59.520069 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:00 crc kubenswrapper[4852]: I0129 11:03:00.332250 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.396823 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerStarted","Data":"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.397235 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerStarted","Data":"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.397187 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-log" containerID="cri-o://9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" gracePeriod=30 Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.397435 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-metadata" containerID="cri-o://c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" gracePeriod=30 Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.406235 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerStarted","Data":"e238f6e4fb8be6b2a6e4c0cdf1fbdffd1010b03584ce000320137c375372d096"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.406295 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerStarted","Data":"c745dfab9a29fbe2967f87a7ce5cec429071fe93027eb998fd737fe8ea427801"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.408985 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d","Type":"ContainerStarted","Data":"60ea2d5c51642d661fc0ac3419e0195000e0220cd10f70080639da96696f526f"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.409071 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://60ea2d5c51642d661fc0ac3419e0195000e0220cd10f70080639da96696f526f" gracePeriod=30 Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.411504 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5a6901f-6ad2-4a55-979c-5af162a11e87","Type":"ContainerStarted","Data":"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5"} Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.425434 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.397783936 podStartE2EDuration="6.425417364s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="2026-01-29 11:02:56.562108032 +0000 UTC m=+1273.779439166" lastFinishedPulling="2026-01-29 11:03:00.58974146 +0000 UTC m=+1277.807072594" observedRunningTime="2026-01-29 11:03:01.415561924 +0000 UTC m=+1278.632893058" watchObservedRunningTime="2026-01-29 11:03:01.425417364 +0000 UTC m=+1278.642748498" Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.438153 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.251361531 podStartE2EDuration="6.438131405s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="2026-01-29 11:02:56.398554269 +0000 UTC m=+1273.615885403" lastFinishedPulling="2026-01-29 11:03:00.585324143 +0000 UTC m=+1277.802655277" observedRunningTime="2026-01-29 11:03:01.436014433 +0000 UTC m=+1278.653345577" watchObservedRunningTime="2026-01-29 11:03:01.438131405 +0000 UTC m=+1278.655462559" Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.453049 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.658954722 podStartE2EDuration="6.453034568s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="2026-01-29 11:02:56.792438796 +0000 UTC m=+1274.009769930" lastFinishedPulling="2026-01-29 11:03:00.586518642 +0000 UTC m=+1277.803849776" observedRunningTime="2026-01-29 11:03:01.449083412 +0000 UTC m=+1278.666414546" watchObservedRunningTime="2026-01-29 11:03:01.453034568 +0000 UTC m=+1278.670365702" Jan 29 11:03:01 crc kubenswrapper[4852]: I0129 11:03:01.471458 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.728194052 podStartE2EDuration="6.471444478s" podCreationTimestamp="2026-01-29 11:02:55 +0000 UTC" firstStartedPulling="2026-01-29 11:02:56.842150809 +0000 UTC m=+1274.059481943" lastFinishedPulling="2026-01-29 11:03:00.585401235 +0000 UTC m=+1277.802732369" observedRunningTime="2026-01-29 11:03:01.468837114 +0000 UTC m=+1278.686168258" watchObservedRunningTime="2026-01-29 11:03:01.471444478 +0000 UTC m=+1278.688775612" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.287475 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.393073 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data\") pod \"071d024d-c42c-405b-b565-a0cd6e82aa71\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.393163 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgfsw\" (UniqueName: \"kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw\") pod \"071d024d-c42c-405b-b565-a0cd6e82aa71\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.393189 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs\") pod \"071d024d-c42c-405b-b565-a0cd6e82aa71\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.393232 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle\") pod \"071d024d-c42c-405b-b565-a0cd6e82aa71\" (UID: \"071d024d-c42c-405b-b565-a0cd6e82aa71\") " Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.399185 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs" (OuterVolumeSpecName: "logs") pod "071d024d-c42c-405b-b565-a0cd6e82aa71" (UID: "071d024d-c42c-405b-b565-a0cd6e82aa71"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.415399 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw" (OuterVolumeSpecName: "kube-api-access-hgfsw") pod "071d024d-c42c-405b-b565-a0cd6e82aa71" (UID: "071d024d-c42c-405b-b565-a0cd6e82aa71"). InnerVolumeSpecName "kube-api-access-hgfsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.426933 4852 generic.go:334] "Generic (PLEG): container finished" podID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerID="c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" exitCode=0 Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.426972 4852 generic.go:334] "Generic (PLEG): container finished" podID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerID="9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" exitCode=143 Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.428079 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.428774 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerDied","Data":"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e"} Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.428856 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerDied","Data":"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0"} Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.428880 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"071d024d-c42c-405b-b565-a0cd6e82aa71","Type":"ContainerDied","Data":"a42953459a1c3742453f9c3d574a10a4df019f776d3ae1434f0004b7359fa7fc"} Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.428926 4852 scope.go:117] "RemoveContainer" containerID="c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.444731 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data" (OuterVolumeSpecName: "config-data") pod "071d024d-c42c-405b-b565-a0cd6e82aa71" (UID: "071d024d-c42c-405b-b565-a0cd6e82aa71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.449724 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "071d024d-c42c-405b-b565-a0cd6e82aa71" (UID: "071d024d-c42c-405b-b565-a0cd6e82aa71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.468989 4852 scope.go:117] "RemoveContainer" containerID="9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.495671 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071d024d-c42c-405b-b565-a0cd6e82aa71-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.495708 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.495719 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/071d024d-c42c-405b-b565-a0cd6e82aa71-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.495730 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgfsw\" (UniqueName: \"kubernetes.io/projected/071d024d-c42c-405b-b565-a0cd6e82aa71-kube-api-access-hgfsw\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.500364 4852 scope.go:117] "RemoveContainer" containerID="c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" Jan 29 11:03:02 crc kubenswrapper[4852]: E0129 11:03:02.501060 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e\": container with ID starting with c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e not found: ID does not exist" containerID="c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501084 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e"} err="failed to get container status \"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e\": rpc error: code = NotFound desc = could not find container \"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e\": container with ID starting with c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e not found: ID does not exist" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501101 4852 scope.go:117] "RemoveContainer" containerID="9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" Jan 29 11:03:02 crc kubenswrapper[4852]: E0129 11:03:02.501433 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0\": container with ID starting with 9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0 not found: ID does not exist" containerID="9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501467 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0"} err="failed to get container status \"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0\": rpc error: code = NotFound desc = could not find container \"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0\": container with ID starting with 9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0 not found: ID does not exist" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501519 4852 scope.go:117] "RemoveContainer" containerID="c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501945 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e"} err="failed to get container status \"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e\": rpc error: code = NotFound desc = could not find container \"c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e\": container with ID starting with c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e not found: ID does not exist" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.501967 4852 scope.go:117] "RemoveContainer" containerID="9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.502202 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0"} err="failed to get container status \"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0\": rpc error: code = NotFound desc = could not find container \"9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0\": container with ID starting with 9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0 not found: ID does not exist" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.769412 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.782232 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.792813 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:02 crc kubenswrapper[4852]: E0129 11:03:02.793250 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-log" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.793268 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-log" Jan 29 11:03:02 crc kubenswrapper[4852]: E0129 11:03:02.793313 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-metadata" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.793319 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-metadata" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.793492 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-log" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.793512 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" containerName="nova-metadata-metadata" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.794611 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.801504 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.801952 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.804088 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.903702 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gflgz\" (UniqueName: \"kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.904022 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.904129 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.904521 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:02 crc kubenswrapper[4852]: I0129 11:03:02.904857 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.006725 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.006792 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.006848 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.006930 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.007008 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gflgz\" (UniqueName: \"kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.008531 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.012234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.012393 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.019061 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.031338 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gflgz\" (UniqueName: \"kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz\") pod \"nova-metadata-0\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.119575 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.506647 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="071d024d-c42c-405b-b565-a0cd6e82aa71" path="/var/lib/kubelet/pods/071d024d-c42c-405b-b565-a0cd6e82aa71/volumes" Jan 29 11:03:03 crc kubenswrapper[4852]: I0129 11:03:03.584283 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.447979 4852 generic.go:334] "Generic (PLEG): container finished" podID="2ce36949-28e5-43d1-ae87-7cf9113cc884" containerID="b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab" exitCode=0 Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.448084 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nwllr" event={"ID":"2ce36949-28e5-43d1-ae87-7cf9113cc884","Type":"ContainerDied","Data":"b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab"} Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.451045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerStarted","Data":"312816bc4fa34a94ab28a45822b953085506a1c41a6822adcec69cb7b009660d"} Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.451086 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerStarted","Data":"2f84f2c11a2ecec3e0c0fe0db54616addf33baede66b8b9e71ce016952a120c9"} Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.451097 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerStarted","Data":"05d652ddc5de07c93c3908ba108cce41dccebfd2b26cb6d1741eece714750112"} Jan 29 11:03:04 crc kubenswrapper[4852]: I0129 11:03:04.488490 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.488464571 podStartE2EDuration="2.488464571s" podCreationTimestamp="2026-01-29 11:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:04.486008922 +0000 UTC m=+1281.703340066" watchObservedRunningTime="2026-01-29 11:03:04.488464571 +0000 UTC m=+1281.705795705" Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.061918 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a1d369c_e67c_4fa5_897f_38a385e6841b.slice/crio-conmon-f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a1d369c_e67c_4fa5_897f_38a385e6841b.slice/crio-conmon-f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.061989 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a1d369c_e67c_4fa5_897f_38a385e6841b.slice/crio-f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a1d369c_e67c_4fa5_897f_38a385e6841b.slice/crio-f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.066568 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-conmon-9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-conmon-9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.067721 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-9d0f75f66e313b49a5ef8b1124457e813b69e98516e090611d01a8f648213df0.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.071165 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-conmon-c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-conmon-c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: W0129 11:03:05.071753 4852 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-c1de68ea1ba3820952bbfc62285867ee71d1418c0a93f6614f1de92bd35f6a4e.scope: no such file or directory Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.465335 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerID="e6181736252555859f974f1832bd23c419f205a5658446a9de578e5e2bf92917" exitCode=137 Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.477869 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerDied","Data":"e6181736252555859f974f1832bd23c419f205a5658446a9de578e5e2bf92917"} Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.763150 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.763208 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.794202 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.837795 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.966865 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data\") pod \"2ce36949-28e5-43d1-ae87-7cf9113cc884\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.967306 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p56wq\" (UniqueName: \"kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq\") pod \"2ce36949-28e5-43d1-ae87-7cf9113cc884\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.967416 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle\") pod \"2ce36949-28e5-43d1-ae87-7cf9113cc884\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.967453 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts\") pod \"2ce36949-28e5-43d1-ae87-7cf9113cc884\" (UID: \"2ce36949-28e5-43d1-ae87-7cf9113cc884\") " Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.972502 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq" (OuterVolumeSpecName: "kube-api-access-p56wq") pod "2ce36949-28e5-43d1-ae87-7cf9113cc884" (UID: "2ce36949-28e5-43d1-ae87-7cf9113cc884"). InnerVolumeSpecName "kube-api-access-p56wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:05 crc kubenswrapper[4852]: I0129 11:03:05.972558 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts" (OuterVolumeSpecName: "scripts") pod "2ce36949-28e5-43d1-ae87-7cf9113cc884" (UID: "2ce36949-28e5-43d1-ae87-7cf9113cc884"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:05.999042 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data" (OuterVolumeSpecName: "config-data") pod "2ce36949-28e5-43d1-ae87-7cf9113cc884" (UID: "2ce36949-28e5-43d1-ae87-7cf9113cc884"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:05.999520 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ce36949-28e5-43d1-ae87-7cf9113cc884" (UID: "2ce36949-28e5-43d1-ae87-7cf9113cc884"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.016551 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.016669 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069184 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069231 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrm4c\" (UniqueName: \"kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069332 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069353 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069412 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069491 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.069544 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data\") pod \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\" (UID: \"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456\") " Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.070304 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.070323 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p56wq\" (UniqueName: \"kubernetes.io/projected/2ce36949-28e5-43d1-ae87-7cf9113cc884-kube-api-access-p56wq\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.070334 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.070346 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ce36949-28e5-43d1-ae87-7cf9113cc884-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.084228 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.085341 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.085800 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts" (OuterVolumeSpecName: "scripts") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.087018 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.088993 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c" (OuterVolumeSpecName: "kube-api-access-lrm4c") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "kube-api-access-lrm4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.099381 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.099838 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="dnsmasq-dns" containerID="cri-o://6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0" gracePeriod=10 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.100396 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.100459 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.126093 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.172531 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.172560 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrm4c\" (UniqueName: \"kubernetes.io/projected/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-kube-api-access-lrm4c\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.172570 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.172591 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.172601 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.203202 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.247842 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data" (OuterVolumeSpecName: "config-data") pod "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" (UID: "b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.274677 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.274720 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.478325 4852 generic.go:334] "Generic (PLEG): container finished" podID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerID="6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0" exitCode=0 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.478391 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" event={"ID":"7425bd14-f51f-408c-8fa0-749ce9aa74c7","Type":"ContainerDied","Data":"6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0"} Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.482931 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nwllr" event={"ID":"2ce36949-28e5-43d1-ae87-7cf9113cc884","Type":"ContainerDied","Data":"fef5b0bf9b014fe63c2f9c8a62c1d8c4bb9e45522915a5d3e8f9465a5eb3251e"} Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.482969 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fef5b0bf9b014fe63c2f9c8a62c1d8c4bb9e45522915a5d3e8f9465a5eb3251e" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.483022 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nwllr" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.490251 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.492805 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456","Type":"ContainerDied","Data":"9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566"} Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.492873 4852 scope.go:117] "RemoveContainer" containerID="e6181736252555859f974f1832bd23c419f205a5658446a9de578e5e2bf92917" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.527263 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.529319 4852 scope.go:117] "RemoveContainer" containerID="6ee122dee8f0a7226fd55a5feef48ff0fe98dc60f8c10f7012db4ecc52018411" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.569666 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.572429 4852 scope.go:117] "RemoveContainer" containerID="bea28cac6b094c8ec53ab97bca35cb15116452dad13ef07cdeeda51c512d43aa" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.583092 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.612638 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: E0129 11:03:06.613096 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-central-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613119 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-central-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: E0129 11:03:06.613131 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-notification-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613137 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-notification-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: E0129 11:03:06.613156 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="proxy-httpd" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613162 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="proxy-httpd" Jan 29 11:03:06 crc kubenswrapper[4852]: E0129 11:03:06.613176 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ce36949-28e5-43d1-ae87-7cf9113cc884" containerName="nova-manage" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613182 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ce36949-28e5-43d1-ae87-7cf9113cc884" containerName="nova-manage" Jan 29 11:03:06 crc kubenswrapper[4852]: E0129 11:03:06.613193 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="sg-core" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613200 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="sg-core" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613382 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="sg-core" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613401 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-central-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613419 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ce36949-28e5-43d1-ae87-7cf9113cc884" containerName="nova-manage" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613432 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="ceilometer-notification-agent" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.613444 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" containerName="proxy-httpd" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.615118 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.620530 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.620738 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.633768 4852 scope.go:117] "RemoveContainer" containerID="5f8fce3bcf739f9e07dc49117c782949efc448a9336705688d06b41810eb0c29" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.657658 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.681931 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682058 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682096 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682118 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682136 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682153 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.682202 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xljn\" (UniqueName: \"kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.690367 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.690684 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-log" containerID="cri-o://c745dfab9a29fbe2967f87a7ce5cec429071fe93027eb998fd737fe8ea427801" gracePeriod=30 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.691148 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-api" containerID="cri-o://e238f6e4fb8be6b2a6e4c0cdf1fbdffd1010b03584ce000320137c375372d096" gracePeriod=30 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.698694 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": EOF" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.698869 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": EOF" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.708083 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.708345 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-log" containerID="cri-o://2f84f2c11a2ecec3e0c0fe0db54616addf33baede66b8b9e71ce016952a120c9" gracePeriod=30 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.708762 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-metadata" containerID="cri-o://312816bc4fa34a94ab28a45822b953085506a1c41a6822adcec69cb7b009660d" gracePeriod=30 Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783770 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783898 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783959 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783976 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.783991 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.784193 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xljn\" (UniqueName: \"kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.784760 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.784804 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.788930 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.789495 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.795380 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.804658 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.809428 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xljn\" (UniqueName: \"kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn\") pod \"ceilometer-0\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " pod="openstack/ceilometer-0" Jan 29 11:03:06 crc kubenswrapper[4852]: I0129 11:03:06.974595 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.116304 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.173167 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292364 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292483 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292575 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292669 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwx2f\" (UniqueName: \"kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292740 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.292762 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb\") pod \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\" (UID: \"7425bd14-f51f-408c-8fa0-749ce9aa74c7\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.310887 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f" (OuterVolumeSpecName: "kube-api-access-zwx2f") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "kube-api-access-zwx2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.373472 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config" (OuterVolumeSpecName: "config") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.377673 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.384929 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.396276 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.404774 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.404806 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwx2f\" (UniqueName: \"kubernetes.io/projected/7425bd14-f51f-408c-8fa0-749ce9aa74c7-kube-api-access-zwx2f\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.404816 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.404825 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.404833 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.426069 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7425bd14-f51f-408c-8fa0-749ce9aa74c7" (UID: "7425bd14-f51f-408c-8fa0-749ce9aa74c7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.476558 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456" path="/var/lib/kubelet/pods/b8af8e94-f9a1-4f5a-ba3a-0916ff9f0456/volumes" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.507676 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7425bd14-f51f-408c-8fa0-749ce9aa74c7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508847 4852 generic.go:334] "Generic (PLEG): container finished" podID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerID="312816bc4fa34a94ab28a45822b953085506a1c41a6822adcec69cb7b009660d" exitCode=0 Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508877 4852 generic.go:334] "Generic (PLEG): container finished" podID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerID="2f84f2c11a2ecec3e0c0fe0db54616addf33baede66b8b9e71ce016952a120c9" exitCode=143 Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508929 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerDied","Data":"312816bc4fa34a94ab28a45822b953085506a1c41a6822adcec69cb7b009660d"} Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508959 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerDied","Data":"2f84f2c11a2ecec3e0c0fe0db54616addf33baede66b8b9e71ce016952a120c9"} Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508971 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da098609-a2a4-4bd0-8f3a-52a9f47cfe07","Type":"ContainerDied","Data":"05d652ddc5de07c93c3908ba108cce41dccebfd2b26cb6d1741eece714750112"} Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.508981 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05d652ddc5de07c93c3908ba108cce41dccebfd2b26cb6d1741eece714750112" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.516885 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.520325 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" event={"ID":"7425bd14-f51f-408c-8fa0-749ce9aa74c7","Type":"ContainerDied","Data":"d70f315c3fb8e255c5172504d9903fa86c29aaa900f550623954272bafb1ca81"} Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.520405 4852 scope.go:117] "RemoveContainer" containerID="6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.520568 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-fb2bm" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.547856 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerID="c745dfab9a29fbe2967f87a7ce5cec429071fe93027eb998fd737fe8ea427801" exitCode=143 Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.548573 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerDied","Data":"c745dfab9a29fbe2967f87a7ce5cec429071fe93027eb998fd737fe8ea427801"} Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.552312 4852 scope.go:117] "RemoveContainer" containerID="e4bd4060c81becf02dfdec18bec7bb87edd2e53e73362e58cbe011ad2505c1cd" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.570422 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.589355 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-fb2bm"] Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.608420 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gflgz\" (UniqueName: \"kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz\") pod \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.608507 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data\") pod \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.608533 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs\") pod \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.608774 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle\") pod \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.608811 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs\") pod \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\" (UID: \"da098609-a2a4-4bd0-8f3a-52a9f47cfe07\") " Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.615815 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs" (OuterVolumeSpecName: "logs") pod "da098609-a2a4-4bd0-8f3a-52a9f47cfe07" (UID: "da098609-a2a4-4bd0-8f3a-52a9f47cfe07"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.619776 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz" (OuterVolumeSpecName: "kube-api-access-gflgz") pod "da098609-a2a4-4bd0-8f3a-52a9f47cfe07" (UID: "da098609-a2a4-4bd0-8f3a-52a9f47cfe07"). InnerVolumeSpecName "kube-api-access-gflgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.643846 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da098609-a2a4-4bd0-8f3a-52a9f47cfe07" (UID: "da098609-a2a4-4bd0-8f3a-52a9f47cfe07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.651448 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data" (OuterVolumeSpecName: "config-data") pod "da098609-a2a4-4bd0-8f3a-52a9f47cfe07" (UID: "da098609-a2a4-4bd0-8f3a-52a9f47cfe07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.682790 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "da098609-a2a4-4bd0-8f3a-52a9f47cfe07" (UID: "da098609-a2a4-4bd0-8f3a-52a9f47cfe07"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.694812 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.710604 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gflgz\" (UniqueName: \"kubernetes.io/projected/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-kube-api-access-gflgz\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.710646 4852 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.710661 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.710672 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:07 crc kubenswrapper[4852]: I0129 11:03:07.710683 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da098609-a2a4-4bd0-8f3a-52a9f47cfe07-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.568780 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerStarted","Data":"adcf96c63d812892e3e7bd74dd758104f3e6453497e0e5d1e154ef7b704985b1"} Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.571302 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.571377 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerName="nova-scheduler-scheduler" containerID="cri-o://b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" gracePeriod=30 Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.610452 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.627980 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.647283 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:08 crc kubenswrapper[4852]: E0129 11:03:08.647703 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="dnsmasq-dns" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.647721 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="dnsmasq-dns" Jan 29 11:03:08 crc kubenswrapper[4852]: E0129 11:03:08.647752 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-metadata" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.647760 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-metadata" Jan 29 11:03:08 crc kubenswrapper[4852]: E0129 11:03:08.647783 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-log" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.647789 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-log" Jan 29 11:03:08 crc kubenswrapper[4852]: E0129 11:03:08.647800 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="init" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.647808 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="init" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.648007 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" containerName="dnsmasq-dns" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.648030 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-log" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.648039 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" containerName="nova-metadata-metadata" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.649032 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.655215 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.657566 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.657761 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.730070 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.730151 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rh9d5\" (UniqueName: \"kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.730425 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.730553 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.730603 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.832344 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.832445 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rh9d5\" (UniqueName: \"kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.832520 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.832572 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.832619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.833111 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.838558 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.839103 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.839298 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.853989 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rh9d5\" (UniqueName: \"kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5\") pod \"nova-metadata-0\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " pod="openstack/nova-metadata-0" Jan 29 11:03:08 crc kubenswrapper[4852]: I0129 11:03:08.978843 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:09 crc kubenswrapper[4852]: I0129 11:03:09.474986 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7425bd14-f51f-408c-8fa0-749ce9aa74c7" path="/var/lib/kubelet/pods/7425bd14-f51f-408c-8fa0-749ce9aa74c7/volumes" Jan 29 11:03:09 crc kubenswrapper[4852]: I0129 11:03:09.476099 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da098609-a2a4-4bd0-8f3a-52a9f47cfe07" path="/var/lib/kubelet/pods/da098609-a2a4-4bd0-8f3a-52a9f47cfe07/volumes" Jan 29 11:03:09 crc kubenswrapper[4852]: I0129 11:03:09.476656 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:09 crc kubenswrapper[4852]: I0129 11:03:09.583868 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerStarted","Data":"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c"} Jan 29 11:03:09 crc kubenswrapper[4852]: I0129 11:03:09.586212 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerStarted","Data":"3513c2a7edef22223380bac870f6126b6e9b4488db6706e526794b832eb2f584"} Jan 29 11:03:10 crc kubenswrapper[4852]: I0129 11:03:10.597187 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerStarted","Data":"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4"} Jan 29 11:03:10 crc kubenswrapper[4852]: I0129 11:03:10.597777 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerStarted","Data":"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713"} Jan 29 11:03:10 crc kubenswrapper[4852]: I0129 11:03:10.618063 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.61804089 podStartE2EDuration="2.61804089s" podCreationTimestamp="2026-01-29 11:03:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:10.612903085 +0000 UTC m=+1287.830234249" watchObservedRunningTime="2026-01-29 11:03:10.61804089 +0000 UTC m=+1287.835372024" Jan 29 11:03:10 crc kubenswrapper[4852]: E0129 11:03:10.765272 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:10 crc kubenswrapper[4852]: E0129 11:03:10.766802 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:10 crc kubenswrapper[4852]: E0129 11:03:10.768347 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:10 crc kubenswrapper[4852]: E0129 11:03:10.768386 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerName="nova-scheduler-scheduler" Jan 29 11:03:11 crc kubenswrapper[4852]: I0129 11:03:11.608740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerStarted","Data":"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143"} Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.656277 4852 generic.go:334] "Generic (PLEG): container finished" podID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerID="e238f6e4fb8be6b2a6e4c0cdf1fbdffd1010b03584ce000320137c375372d096" exitCode=0 Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.656882 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerDied","Data":"e238f6e4fb8be6b2a6e4c0cdf1fbdffd1010b03584ce000320137c375372d096"} Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.656915 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7","Type":"ContainerDied","Data":"3f5f9df82850faa8f5e824b99dfb497a37bf276ca642065b32f694b424bfbfd3"} Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.656939 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f5f9df82850faa8f5e824b99dfb497a37bf276ca642065b32f694b424bfbfd3" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.697947 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.813555 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqzsx\" (UniqueName: \"kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx\") pod \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.814257 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs\") pod \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.814370 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle\") pod \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.814536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data\") pod \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\" (UID: \"4ca7b700-a8ce-44ad-8838-67e14bb0d5a7\") " Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.815704 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs" (OuterVolumeSpecName: "logs") pod "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" (UID: "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.825376 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx" (OuterVolumeSpecName: "kube-api-access-kqzsx") pod "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" (UID: "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7"). InnerVolumeSpecName "kube-api-access-kqzsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.846214 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data" (OuterVolumeSpecName: "config-data") pod "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" (UID: "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.847635 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" (UID: "4ca7b700-a8ce-44ad-8838-67e14bb0d5a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.916870 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqzsx\" (UniqueName: \"kubernetes.io/projected/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-kube-api-access-kqzsx\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.916907 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.916919 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:12 crc kubenswrapper[4852]: I0129 11:03:12.916928 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.436747 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.529285 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh9wc\" (UniqueName: \"kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc\") pod \"d5a6901f-6ad2-4a55-979c-5af162a11e87\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.529360 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data\") pod \"d5a6901f-6ad2-4a55-979c-5af162a11e87\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.529407 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle\") pod \"d5a6901f-6ad2-4a55-979c-5af162a11e87\" (UID: \"d5a6901f-6ad2-4a55-979c-5af162a11e87\") " Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.537837 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc" (OuterVolumeSpecName: "kube-api-access-vh9wc") pod "d5a6901f-6ad2-4a55-979c-5af162a11e87" (UID: "d5a6901f-6ad2-4a55-979c-5af162a11e87"). InnerVolumeSpecName "kube-api-access-vh9wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.561663 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data" (OuterVolumeSpecName: "config-data") pod "d5a6901f-6ad2-4a55-979c-5af162a11e87" (UID: "d5a6901f-6ad2-4a55-979c-5af162a11e87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.607425 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5a6901f-6ad2-4a55-979c-5af162a11e87" (UID: "d5a6901f-6ad2-4a55-979c-5af162a11e87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.631343 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh9wc\" (UniqueName: \"kubernetes.io/projected/d5a6901f-6ad2-4a55-979c-5af162a11e87-kube-api-access-vh9wc\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.631386 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.631400 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a6901f-6ad2-4a55-979c-5af162a11e87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.667008 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerStarted","Data":"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0"} Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668646 4852 generic.go:334] "Generic (PLEG): container finished" podID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" exitCode=0 Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668737 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668769 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5a6901f-6ad2-4a55-979c-5af162a11e87","Type":"ContainerDied","Data":"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5"} Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d5a6901f-6ad2-4a55-979c-5af162a11e87","Type":"ContainerDied","Data":"10bfd56d573936878281c8e99381c7a3e9e571563e9844ba8d43c6c06a85cf19"} Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668819 4852 scope.go:117] "RemoveContainer" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.668842 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.694496 4852 scope.go:117] "RemoveContainer" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" Jan 29 11:03:13 crc kubenswrapper[4852]: E0129 11:03:13.694920 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5\": container with ID starting with b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5 not found: ID does not exist" containerID="b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.694959 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5"} err="failed to get container status \"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5\": rpc error: code = NotFound desc = could not find container \"b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5\": container with ID starting with b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5 not found: ID does not exist" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.697556 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.707713 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.716157 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.725545 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: E0129 11:03:13.726088 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-log" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726119 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-log" Jan 29 11:03:13 crc kubenswrapper[4852]: E0129 11:03:13.726155 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-api" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726162 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-api" Jan 29 11:03:13 crc kubenswrapper[4852]: E0129 11:03:13.726190 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerName="nova-scheduler-scheduler" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726198 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerName="nova-scheduler-scheduler" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726408 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-api" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726427 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" containerName="nova-scheduler-scheduler" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.726443 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" containerName="nova-api-log" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.727797 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.730502 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.733069 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.733158 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.733230 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdnhf\" (UniqueName: \"kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.733303 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.742882 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.751412 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.753032 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.766751 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.769401 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.772752 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.834773 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.834871 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.834901 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.834941 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.834990 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.835015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdnhf\" (UniqueName: \"kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.835042 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k96vz\" (UniqueName: \"kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.835180 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.839390 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.850404 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdnhf\" (UniqueName: \"kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.858437 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " pod="openstack/nova-api-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.937142 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.937626 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k96vz\" (UniqueName: \"kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.937839 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.941534 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.944182 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.962863 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k96vz\" (UniqueName: \"kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz\") pod \"nova-scheduler-0\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.979314 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 11:03:13 crc kubenswrapper[4852]: I0129 11:03:13.979592 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 11:03:14 crc kubenswrapper[4852]: I0129 11:03:14.046741 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:14 crc kubenswrapper[4852]: I0129 11:03:14.094074 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:14 crc kubenswrapper[4852]: I0129 11:03:14.558172 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:14 crc kubenswrapper[4852]: I0129 11:03:14.685246 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerStarted","Data":"14e4a52e11f93d528cbbb25f4b5bab005f0940b9dbad46ca22a566abc5f030fd"} Jan 29 11:03:14 crc kubenswrapper[4852]: I0129 11:03:14.700480 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.477238 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ca7b700-a8ce-44ad-8838-67e14bb0d5a7" path="/var/lib/kubelet/pods/4ca7b700-a8ce-44ad-8838-67e14bb0d5a7/volumes" Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.478352 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5a6901f-6ad2-4a55-979c-5af162a11e87" path="/var/lib/kubelet/pods/d5a6901f-6ad2-4a55-979c-5af162a11e87/volumes" Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.706528 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerStarted","Data":"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b"} Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.706641 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.708880 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2738152f-742e-46f9-b7b7-3db29a88cf62","Type":"ContainerStarted","Data":"c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730"} Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.708929 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2738152f-742e-46f9-b7b7-3db29a88cf62","Type":"ContainerStarted","Data":"3bbbb669b115d02a9595879f93fcabbad2a69fb79743bfc3cf646bdf1806a47c"} Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.712101 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerStarted","Data":"367985753e381bc0be936084bb20a7a3755257f94acf3f6ddc8317601dccac3f"} Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.712141 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerStarted","Data":"8d1ac7cfdca310e6fe9133057bb96db75284747694dd112000660faaa660faa5"} Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.734576 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.613095298 podStartE2EDuration="9.734558335s" podCreationTimestamp="2026-01-29 11:03:06 +0000 UTC" firstStartedPulling="2026-01-29 11:03:07.699966723 +0000 UTC m=+1284.917297857" lastFinishedPulling="2026-01-29 11:03:14.82142976 +0000 UTC m=+1292.038760894" observedRunningTime="2026-01-29 11:03:15.728104748 +0000 UTC m=+1292.945435922" watchObservedRunningTime="2026-01-29 11:03:15.734558335 +0000 UTC m=+1292.951889469" Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.746870 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.7468517649999997 podStartE2EDuration="2.746851765s" podCreationTimestamp="2026-01-29 11:03:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:15.746766083 +0000 UTC m=+1292.964097227" watchObservedRunningTime="2026-01-29 11:03:15.746851765 +0000 UTC m=+1292.964182899" Jan 29 11:03:15 crc kubenswrapper[4852]: I0129 11:03:15.767567 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.76751641 podStartE2EDuration="2.76751641s" podCreationTimestamp="2026-01-29 11:03:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:15.764940857 +0000 UTC m=+1292.982272011" watchObservedRunningTime="2026-01-29 11:03:15.76751641 +0000 UTC m=+1292.984847544" Jan 29 11:03:18 crc kubenswrapper[4852]: I0129 11:03:18.980298 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 11:03:18 crc kubenswrapper[4852]: I0129 11:03:18.980696 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 11:03:19 crc kubenswrapper[4852]: I0129 11:03:19.094490 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 11:03:19 crc kubenswrapper[4852]: I0129 11:03:19.993834 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:03:19 crc kubenswrapper[4852]: I0129 11:03:19.993851 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:03:20 crc kubenswrapper[4852]: I0129 11:03:20.759632 4852 generic.go:334] "Generic (PLEG): container finished" podID="09e692ef-fb62-44d3-8b88-09fa15eaae6f" containerID="997dcd23b056aaad9a977198f37d86b72e4af2e2a523b009c2a3efd396195c6c" exitCode=0 Jan 29 11:03:20 crc kubenswrapper[4852]: I0129 11:03:20.759848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" event={"ID":"09e692ef-fb62-44d3-8b88-09fa15eaae6f","Type":"ContainerDied","Data":"997dcd23b056aaad9a977198f37d86b72e4af2e2a523b009c2a3efd396195c6c"} Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.084174 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.196465 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data\") pod \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.196599 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g5vf\" (UniqueName: \"kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf\") pod \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.196689 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts\") pod \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.196792 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle\") pod \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\" (UID: \"09e692ef-fb62-44d3-8b88-09fa15eaae6f\") " Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.202773 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf" (OuterVolumeSpecName: "kube-api-access-9g5vf") pod "09e692ef-fb62-44d3-8b88-09fa15eaae6f" (UID: "09e692ef-fb62-44d3-8b88-09fa15eaae6f"). InnerVolumeSpecName "kube-api-access-9g5vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.208893 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts" (OuterVolumeSpecName: "scripts") pod "09e692ef-fb62-44d3-8b88-09fa15eaae6f" (UID: "09e692ef-fb62-44d3-8b88-09fa15eaae6f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.225451 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09e692ef-fb62-44d3-8b88-09fa15eaae6f" (UID: "09e692ef-fb62-44d3-8b88-09fa15eaae6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.229879 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data" (OuterVolumeSpecName: "config-data") pod "09e692ef-fb62-44d3-8b88-09fa15eaae6f" (UID: "09e692ef-fb62-44d3-8b88-09fa15eaae6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.299044 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.299081 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.299094 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e692ef-fb62-44d3-8b88-09fa15eaae6f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.299103 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g5vf\" (UniqueName: \"kubernetes.io/projected/09e692ef-fb62-44d3-8b88-09fa15eaae6f-kube-api-access-9g5vf\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.788697 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" event={"ID":"09e692ef-fb62-44d3-8b88-09fa15eaae6f","Type":"ContainerDied","Data":"f3bc4cc541657d39e1be2dda2a169324c0209d0c97a1b923dcf6c81a58024369"} Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.788735 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3bc4cc541657d39e1be2dda2a169324c0209d0c97a1b923dcf6c81a58024369" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.788766 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-nt5bd" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.860190 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:03:22 crc kubenswrapper[4852]: E0129 11:03:22.860692 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e692ef-fb62-44d3-8b88-09fa15eaae6f" containerName="nova-cell1-conductor-db-sync" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.860717 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e692ef-fb62-44d3-8b88-09fa15eaae6f" containerName="nova-cell1-conductor-db-sync" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.860985 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e692ef-fb62-44d3-8b88-09fa15eaae6f" containerName="nova-cell1-conductor-db-sync" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.861788 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.870331 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 11:03:22 crc kubenswrapper[4852]: I0129 11:03:22.874799 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.012087 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vxbj\" (UniqueName: \"kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.012233 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.012268 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.113932 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vxbj\" (UniqueName: \"kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.114418 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.114463 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.118613 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.130206 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.134288 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vxbj\" (UniqueName: \"kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj\") pod \"nova-cell1-conductor-0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.180717 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.660224 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:03:23 crc kubenswrapper[4852]: I0129 11:03:23.800226 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e113c351-f17d-477e-b671-0510cd03c0b0","Type":"ContainerStarted","Data":"e295b6bce5ff0853f56668fbfbc3e69104a3ede022de473e48d93def166b5abb"} Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.047505 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.047831 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.094478 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.149506 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.811795 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e113c351-f17d-477e-b671-0510cd03c0b0","Type":"ContainerStarted","Data":"ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3"} Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.811869 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.837499 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.83746841 podStartE2EDuration="2.83746841s" podCreationTimestamp="2026-01-29 11:03:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:24.826512673 +0000 UTC m=+1302.043843807" watchObservedRunningTime="2026-01-29 11:03:24.83746841 +0000 UTC m=+1302.054799554" Jan 29 11:03:24 crc kubenswrapper[4852]: I0129 11:03:24.843258 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 11:03:25 crc kubenswrapper[4852]: I0129 11:03:25.129887 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 11:03:25 crc kubenswrapper[4852]: I0129 11:03:25.129968 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 11:03:28 crc kubenswrapper[4852]: I0129 11:03:28.985250 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 11:03:28 crc kubenswrapper[4852]: I0129 11:03:28.993067 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 11:03:28 crc kubenswrapper[4852]: I0129 11:03:28.994082 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 11:03:29 crc kubenswrapper[4852]: I0129 11:03:29.867682 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 11:03:30 crc kubenswrapper[4852]: I0129 11:03:30.017442 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:03:30 crc kubenswrapper[4852]: I0129 11:03:30.017527 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:03:30 crc kubenswrapper[4852]: E0129 11:03:30.792514 4852 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/92fd50bec620ac810d46ebd1830daef450c8d12decc54ebb7c98e25a91599142/diff" to get inode usage: stat /var/lib/containers/storage/overlay/92fd50bec620ac810d46ebd1830daef450c8d12decc54ebb7c98e25a91599142/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_dnsmasq-dns-6bb4fc677f-fb2bm_7425bd14-f51f-408c-8fa0-749ce9aa74c7/dnsmasq-dns/0.log" to get inode usage: stat /var/log/pods/openstack_dnsmasq-dns-6bb4fc677f-fb2bm_7425bd14-f51f-408c-8fa0-749ce9aa74c7/dnsmasq-dns/0.log: no such file or directory Jan 29 11:03:31 crc kubenswrapper[4852]: W0129 11:03:31.468819 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5a6901f_6ad2_4a55_979c_5af162a11e87.slice/crio-b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5.scope WatchSource:0}: Error finding container b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5: Status 404 returned error can't find the container with id b7c7092f1da8b0b7ce6d9c9e09dca5c2aaaba30c97743ac5fb1c95ef07d0cfd5 Jan 29 11:03:31 crc kubenswrapper[4852]: E0129 11:03:31.680639 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ce36949_28e5_43d1_ae87_7cf9113cc884.slice/crio-b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5a6901f_6ad2_4a55_979c_5af162a11e87.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8af8e94_f9a1_4f5a_ba3a_0916ff9f0456.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice/crio-a42953459a1c3742453f9c3d574a10a4df019f776d3ae1434f0004b7359fa7fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7425bd14_f51f_408c_8fa0_749ce9aa74c7.slice/crio-d70f315c3fb8e255c5172504d9903fa86c29aaa900f550623954272bafb1ca81\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ca7b700_a8ce_44ad_8838_67e14bb0d5a7.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5a6901f_6ad2_4a55_979c_5af162a11e87.slice/crio-10bfd56d573936878281c8e99381c7a3e9e571563e9844ba8d43c6c06a85cf19\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8af8e94_f9a1_4f5a_ba3a_0916ff9f0456.slice/crio-9af2692d586948c7ed36b367478bd72443225d65845f3596353ac8bf57080566\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071d024d_c42c_405b_b565_a0cd6e82aa71.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7425bd14_f51f_408c_8fa0_749ce9aa74c7.slice/crio-conmon-6b28eeca95cbe08e4a5ad1c4feaf4c2345179dcc8ef8abd167dfda1f5b0122b0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ce36949_28e5_43d1_ae87_7cf9113cc884.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ce36949_28e5_43d1_ae87_7cf9113cc884.slice/crio-conmon-b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e692ef_fb62_44d3_8b88_09fa15eaae6f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7425bd14_f51f_408c_8fa0_749ce9aa74c7.slice\": RecentStats: unable to find data in memory cache]" Jan 29 11:03:31 crc kubenswrapper[4852]: I0129 11:03:31.897214 4852 generic.go:334] "Generic (PLEG): container finished" podID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" containerID="60ea2d5c51642d661fc0ac3419e0195000e0220cd10f70080639da96696f526f" exitCode=137 Jan 29 11:03:31 crc kubenswrapper[4852]: I0129 11:03:31.897649 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d","Type":"ContainerDied","Data":"60ea2d5c51642d661fc0ac3419e0195000e0220cd10f70080639da96696f526f"} Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.309380 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.500077 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data\") pod \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.500247 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle\") pod \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.500650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkjm6\" (UniqueName: \"kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6\") pod \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\" (UID: \"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d\") " Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.506863 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6" (OuterVolumeSpecName: "kube-api-access-pkjm6") pod "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" (UID: "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d"). InnerVolumeSpecName "kube-api-access-pkjm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.526854 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data" (OuterVolumeSpecName: "config-data") pod "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" (UID: "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.529073 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" (UID: "e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.603647 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.603687 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.603704 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkjm6\" (UniqueName: \"kubernetes.io/projected/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d-kube-api-access-pkjm6\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.915843 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d","Type":"ContainerDied","Data":"f222946a84b5e67fcaf5003e2d41ee333864c90aa5361fbe4f38d17d27e0d985"} Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.915935 4852 scope.go:117] "RemoveContainer" containerID="60ea2d5c51642d661fc0ac3419e0195000e0220cd10f70080639da96696f526f" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.916180 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.966993 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.977290 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.993929 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:03:32 crc kubenswrapper[4852]: E0129 11:03:32.994538 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.994564 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.994898 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.995787 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:32 crc kubenswrapper[4852]: I0129 11:03:32.998377 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:32.999966 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.000107 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.005105 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.011880 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.012019 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.012066 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.012093 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.012115 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zrx6\" (UniqueName: \"kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.113941 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.114019 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.114247 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.114268 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zrx6\" (UniqueName: \"kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.114777 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.118644 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.119032 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.119606 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.119931 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.135495 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zrx6\" (UniqueName: \"kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6\") pod \"nova-cell1-novncproxy-0\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.210596 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.317781 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.475718 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d" path="/var/lib/kubelet/pods/e1a4a6f5-ef86-4c7b-a33c-208ebcdb8d3d/volumes" Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.767334 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:03:33 crc kubenswrapper[4852]: I0129 11:03:33.925906 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"932e2969-4638-44c5-94f3-bb07c5fd4a8f","Type":"ContainerStarted","Data":"dafd3398dea98e303a224b79b36a63e5d6fc98e9f36dd66db464d941d2aa66d7"} Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.050758 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.051369 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.051512 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.054269 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.943692 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"932e2969-4638-44c5-94f3-bb07c5fd4a8f","Type":"ContainerStarted","Data":"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b"} Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.943927 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.950025 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 11:03:34 crc kubenswrapper[4852]: I0129 11:03:34.990149 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.990124137 podStartE2EDuration="2.990124137s" podCreationTimestamp="2026-01-29 11:03:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:34.981308072 +0000 UTC m=+1312.198639226" watchObservedRunningTime="2026-01-29 11:03:34.990124137 +0000 UTC m=+1312.207455281" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.229503 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.231189 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.247430 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379286 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379558 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379693 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf8nq\" (UniqueName: \"kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379723 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379745 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.379818 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.481806 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf8nq\" (UniqueName: \"kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.481898 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.481927 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.481993 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.482024 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.482043 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.483533 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.483544 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.483879 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.484042 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.484463 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.500616 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf8nq\" (UniqueName: \"kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq\") pod \"dnsmasq-dns-5c7b6c5df9-z48fn\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:35 crc kubenswrapper[4852]: I0129 11:03:35.609203 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:36 crc kubenswrapper[4852]: I0129 11:03:36.088106 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:03:36 crc kubenswrapper[4852]: I0129 11:03:36.961866 4852 generic.go:334] "Generic (PLEG): container finished" podID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerID="8113249d8097096be11542ee81dbefa8adcc9451437768487a64bc9c658b46a8" exitCode=0 Jan 29 11:03:36 crc kubenswrapper[4852]: I0129 11:03:36.961909 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" event={"ID":"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9","Type":"ContainerDied","Data":"8113249d8097096be11542ee81dbefa8adcc9451437768487a64bc9c658b46a8"} Jan 29 11:03:36 crc kubenswrapper[4852]: I0129 11:03:36.962234 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" event={"ID":"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9","Type":"ContainerStarted","Data":"ec03b195d85f021c20a6e6bf903bc579aae9cdcf0cf2d2282e66b86785fc6a2d"} Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.003845 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.395688 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.689893 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974461 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" event={"ID":"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9","Type":"ContainerStarted","Data":"907e93ef39a4a6500cbf037ce8f0712f50de707858f25b39621cba63db775ba4"} Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974664 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-central-agent" containerID="cri-o://882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c" gracePeriod=30 Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974734 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="sg-core" containerID="cri-o://a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0" gracePeriod=30 Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974783 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-notification-agent" containerID="cri-o://a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143" gracePeriod=30 Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974755 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="proxy-httpd" containerID="cri-o://d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b" gracePeriod=30 Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974967 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-log" containerID="cri-o://8d1ac7cfdca310e6fe9133057bb96db75284747694dd112000660faaa660faa5" gracePeriod=30 Jan 29 11:03:37 crc kubenswrapper[4852]: I0129 11:03:37.974994 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-api" containerID="cri-o://367985753e381bc0be936084bb20a7a3755257f94acf3f6ddc8317601dccac3f" gracePeriod=30 Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.006246 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" podStartSLOduration=3.006228818 podStartE2EDuration="3.006228818s" podCreationTimestamp="2026-01-29 11:03:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:38.00018091 +0000 UTC m=+1315.217512054" watchObservedRunningTime="2026-01-29 11:03:38.006228818 +0000 UTC m=+1315.223559952" Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.319124 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986596 4852 generic.go:334] "Generic (PLEG): container finished" podID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerID="d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b" exitCode=0 Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986922 4852 generic.go:334] "Generic (PLEG): container finished" podID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerID="a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0" exitCode=2 Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986618 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerDied","Data":"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b"} Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986975 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerDied","Data":"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0"} Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986997 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerDied","Data":"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c"} Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.986934 4852 generic.go:334] "Generic (PLEG): container finished" podID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerID="882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c" exitCode=0 Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.990600 4852 generic.go:334] "Generic (PLEG): container finished" podID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerID="8d1ac7cfdca310e6fe9133057bb96db75284747694dd112000660faaa660faa5" exitCode=143 Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.990705 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerDied","Data":"8d1ac7cfdca310e6fe9133057bb96db75284747694dd112000660faaa660faa5"} Jan 29 11:03:38 crc kubenswrapper[4852]: I0129 11:03:38.990835 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:40 crc kubenswrapper[4852]: I0129 11:03:40.857579 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:40 crc kubenswrapper[4852]: I0129 11:03:40.857847 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" containerName="kube-state-metrics" containerID="cri-o://0f3f431c82690a3e3065bdf7cc43eedfed3fad3c3a5ef230080a9b29931b406a" gracePeriod=30 Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.008338 4852 generic.go:334] "Generic (PLEG): container finished" podID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" containerID="0f3f431c82690a3e3065bdf7cc43eedfed3fad3c3a5ef230080a9b29931b406a" exitCode=2 Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.008415 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7","Type":"ContainerDied","Data":"0f3f431c82690a3e3065bdf7cc43eedfed3fad3c3a5ef230080a9b29931b406a"} Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.553406 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.647853 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7bk8\" (UniqueName: \"kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8\") pod \"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7\" (UID: \"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7\") " Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.657734 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8" (OuterVolumeSpecName: "kube-api-access-s7bk8") pod "1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" (UID: "1ce39f2f-9d24-4785-84a0-606b6b1ed2d7"). InnerVolumeSpecName "kube-api-access-s7bk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:41 crc kubenswrapper[4852]: I0129 11:03:41.750713 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7bk8\" (UniqueName: \"kubernetes.io/projected/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7-kube-api-access-s7bk8\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.017666 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1ce39f2f-9d24-4785-84a0-606b6b1ed2d7","Type":"ContainerDied","Data":"087c38120c7e4d21ee12c1fa6072526ed743bfc39a48eb861326e91e3a236576"} Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.017706 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.017717 4852 scope.go:117] "RemoveContainer" containerID="0f3f431c82690a3e3065bdf7cc43eedfed3fad3c3a5ef230080a9b29931b406a" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.021406 4852 generic.go:334] "Generic (PLEG): container finished" podID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerID="367985753e381bc0be936084bb20a7a3755257f94acf3f6ddc8317601dccac3f" exitCode=0 Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.021439 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerDied","Data":"367985753e381bc0be936084bb20a7a3755257f94acf3f6ddc8317601dccac3f"} Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.063522 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.069165 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.092482 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:42 crc kubenswrapper[4852]: E0129 11:03:42.093029 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" containerName="kube-state-metrics" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.093051 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" containerName="kube-state-metrics" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.093246 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" containerName="kube-state-metrics" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.093939 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.095960 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.097918 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.102395 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.168060 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.168128 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.168177 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.168213 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-784tl\" (UniqueName: \"kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.270257 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.270321 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-784tl\" (UniqueName: \"kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.270438 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.270485 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.276747 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.276887 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.277480 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.303386 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-784tl\" (UniqueName: \"kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl\") pod \"kube-state-metrics-0\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.417195 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.513140 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.577179 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle\") pod \"7665c83e-9be1-4a56-a2e1-2c59331b652f\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.577316 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data\") pod \"7665c83e-9be1-4a56-a2e1-2c59331b652f\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.577405 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdnhf\" (UniqueName: \"kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf\") pod \"7665c83e-9be1-4a56-a2e1-2c59331b652f\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.577489 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs\") pod \"7665c83e-9be1-4a56-a2e1-2c59331b652f\" (UID: \"7665c83e-9be1-4a56-a2e1-2c59331b652f\") " Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.580137 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs" (OuterVolumeSpecName: "logs") pod "7665c83e-9be1-4a56-a2e1-2c59331b652f" (UID: "7665c83e-9be1-4a56-a2e1-2c59331b652f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.588695 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf" (OuterVolumeSpecName: "kube-api-access-fdnhf") pod "7665c83e-9be1-4a56-a2e1-2c59331b652f" (UID: "7665c83e-9be1-4a56-a2e1-2c59331b652f"). InnerVolumeSpecName "kube-api-access-fdnhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.614185 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data" (OuterVolumeSpecName: "config-data") pod "7665c83e-9be1-4a56-a2e1-2c59331b652f" (UID: "7665c83e-9be1-4a56-a2e1-2c59331b652f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.620888 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7665c83e-9be1-4a56-a2e1-2c59331b652f" (UID: "7665c83e-9be1-4a56-a2e1-2c59331b652f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.680314 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.680350 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7665c83e-9be1-4a56-a2e1-2c59331b652f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.680360 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdnhf\" (UniqueName: \"kubernetes.io/projected/7665c83e-9be1-4a56-a2e1-2c59331b652f-kube-api-access-fdnhf\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.680371 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7665c83e-9be1-4a56-a2e1-2c59331b652f-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.895362 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:03:42 crc kubenswrapper[4852]: W0129 11:03:42.898029 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6b14960_4f7f_465e_8e53_96a14875878e.slice/crio-539f66142b2fd4c380d37975aff9441915243d44b03a29c02ff72640cbfd93f9 WatchSource:0}: Error finding container 539f66142b2fd4c380d37975aff9441915243d44b03a29c02ff72640cbfd93f9: Status 404 returned error can't find the container with id 539f66142b2fd4c380d37975aff9441915243d44b03a29c02ff72640cbfd93f9 Jan 29 11:03:42 crc kubenswrapper[4852]: I0129 11:03:42.902391 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.032290 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7665c83e-9be1-4a56-a2e1-2c59331b652f","Type":"ContainerDied","Data":"14e4a52e11f93d528cbbb25f4b5bab005f0940b9dbad46ca22a566abc5f030fd"} Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.032311 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.032354 4852 scope.go:117] "RemoveContainer" containerID="367985753e381bc0be936084bb20a7a3755257f94acf3f6ddc8317601dccac3f" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.036545 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f6b14960-4f7f-465e-8e53-96a14875878e","Type":"ContainerStarted","Data":"539f66142b2fd4c380d37975aff9441915243d44b03a29c02ff72640cbfd93f9"} Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.061379 4852 scope.go:117] "RemoveContainer" containerID="8d1ac7cfdca310e6fe9133057bb96db75284747694dd112000660faaa660faa5" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.071807 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.097454 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.116262 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:43 crc kubenswrapper[4852]: E0129 11:03:43.116865 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-api" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.116883 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-api" Jan 29 11:03:43 crc kubenswrapper[4852]: E0129 11:03:43.116900 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-log" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.116906 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-log" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.117134 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-log" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.117168 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" containerName="nova-api-api" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.118197 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.120725 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.120767 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.121043 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.134946 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194006 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194092 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194247 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194330 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194402 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r57p\" (UniqueName: \"kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.194439 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295725 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295819 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295878 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295950 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r57p\" (UniqueName: \"kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.295987 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.296380 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.302308 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.302485 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.302689 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.304707 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.318981 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.319147 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r57p\" (UniqueName: \"kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p\") pod \"nova-api-0\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.346793 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.466839 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.488712 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ce39f2f-9d24-4785-84a0-606b6b1ed2d7" path="/var/lib/kubelet/pods/1ce39f2f-9d24-4785-84a0-606b6b1ed2d7/volumes" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.489342 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7665c83e-9be1-4a56-a2e1-2c59331b652f" path="/var/lib/kubelet/pods/7665c83e-9be1-4a56-a2e1-2c59331b652f/volumes" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.613273 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705449 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705722 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705752 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705790 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705876 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705944 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xljn\" (UniqueName: \"kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.705975 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml\") pod \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\" (UID: \"3affc64f-bcd8-4008-b2cf-23295a80e0cb\") " Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.706011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.706448 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.707147 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.711737 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts" (OuterVolumeSpecName: "scripts") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.711949 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn" (OuterVolumeSpecName: "kube-api-access-6xljn") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "kube-api-access-6xljn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.744401 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.808974 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.809012 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3affc64f-bcd8-4008-b2cf-23295a80e0cb-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.809026 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xljn\" (UniqueName: \"kubernetes.io/projected/3affc64f-bcd8-4008-b2cf-23295a80e0cb-kube-api-access-6xljn\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.809037 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.816820 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.838548 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data" (OuterVolumeSpecName: "config-data") pod "3affc64f-bcd8-4008-b2cf-23295a80e0cb" (UID: "3affc64f-bcd8-4008-b2cf-23295a80e0cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.911365 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.911395 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3affc64f-bcd8-4008-b2cf-23295a80e0cb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:43 crc kubenswrapper[4852]: W0129 11:03:43.985437 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0d4e816_9d2e_42de_a67c_6711b756e7e1.slice/crio-d341567357a403f7ee3635626cf596b47810e31dfb949431689a8984acab1918 WatchSource:0}: Error finding container d341567357a403f7ee3635626cf596b47810e31dfb949431689a8984acab1918: Status 404 returned error can't find the container with id d341567357a403f7ee3635626cf596b47810e31dfb949431689a8984acab1918 Jan 29 11:03:43 crc kubenswrapper[4852]: I0129 11:03:43.986053 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.050638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerStarted","Data":"d341567357a403f7ee3635626cf596b47810e31dfb949431689a8984acab1918"} Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.053308 4852 generic.go:334] "Generic (PLEG): container finished" podID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerID="a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143" exitCode=0 Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.053366 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerDied","Data":"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143"} Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.053378 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.053392 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3affc64f-bcd8-4008-b2cf-23295a80e0cb","Type":"ContainerDied","Data":"adcf96c63d812892e3e7bd74dd758104f3e6453497e0e5d1e154ef7b704985b1"} Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.053410 4852 scope.go:117] "RemoveContainer" containerID="d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.061429 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f6b14960-4f7f-465e-8e53-96a14875878e","Type":"ContainerStarted","Data":"40738ebf6fcb34e9c873aac76a52d0310a88a7a7608cb768d663221a2e552a28"} Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.070078 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.071344 4852 scope.go:117] "RemoveContainer" containerID="a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.090664 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.121027 4852 scope.go:117] "RemoveContainer" containerID="a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.163281 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.651837541 podStartE2EDuration="2.161662689s" podCreationTimestamp="2026-01-29 11:03:42 +0000 UTC" firstStartedPulling="2026-01-29 11:03:42.902182167 +0000 UTC m=+1320.119513301" lastFinishedPulling="2026-01-29 11:03:43.412007315 +0000 UTC m=+1320.629338449" observedRunningTime="2026-01-29 11:03:44.095082373 +0000 UTC m=+1321.312413527" watchObservedRunningTime="2026-01-29 11:03:44.161662689 +0000 UTC m=+1321.378993813" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.185962 4852 scope.go:117] "RemoveContainer" containerID="882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.203821 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.211284 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.218971 4852 scope.go:117] "RemoveContainer" containerID="d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.219455 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b\": container with ID starting with d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b not found: ID does not exist" containerID="d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.219540 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b"} err="failed to get container status \"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b\": rpc error: code = NotFound desc = could not find container \"d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b\": container with ID starting with d8d6ae12cfca3c9c247e0db1fea63c1c845a9c4eb7969fdb79adfebc0345e09b not found: ID does not exist" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.219605 4852 scope.go:117] "RemoveContainer" containerID="a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.219858 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0\": container with ID starting with a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0 not found: ID does not exist" containerID="a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.219900 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0"} err="failed to get container status \"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0\": rpc error: code = NotFound desc = could not find container \"a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0\": container with ID starting with a66edb2f9f44558601060a7baaf63e0dec7aab97e116e3ce72da902353cb0ae0 not found: ID does not exist" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.219927 4852 scope.go:117] "RemoveContainer" containerID="a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.220172 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143\": container with ID starting with a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143 not found: ID does not exist" containerID="a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.220211 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143"} err="failed to get container status \"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143\": rpc error: code = NotFound desc = could not find container \"a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143\": container with ID starting with a5b646f092d5d88244da57f8346b3e8a8a46ed11629f8eaabff39a7611047143 not found: ID does not exist" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.220237 4852 scope.go:117] "RemoveContainer" containerID="882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.220382 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c\": container with ID starting with 882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c not found: ID does not exist" containerID="882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.220401 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c"} err="failed to get container status \"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c\": rpc error: code = NotFound desc = could not find container \"882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c\": container with ID starting with 882deafdf99eed5daa7ecbf6b1a87b46d529288fbac12ed6464e289150d38d4c not found: ID does not exist" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.222848 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.223317 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-central-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223334 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-central-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.223372 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="proxy-httpd" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223379 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="proxy-httpd" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.223398 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="sg-core" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223405 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="sg-core" Jan 29 11:03:44 crc kubenswrapper[4852]: E0129 11:03:44.223421 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-notification-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223429 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-notification-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223769 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="sg-core" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223792 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="proxy-httpd" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223810 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-central-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.223826 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" containerName="ceilometer-notification-agent" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.226022 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.228207 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.228433 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.228496 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.236328 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.328711 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.328803 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.328861 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.328888 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.328940 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.329063 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltpts\" (UniqueName: \"kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.329161 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.329223 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.369272 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-f76js"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.370959 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.373811 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.374284 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.386452 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f76js"] Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549207 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltpts\" (UniqueName: \"kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549324 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549386 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549446 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549495 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549530 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549565 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.549622 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.560121 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.563141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.571703 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.572688 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.575198 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.577615 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.585848 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.586564 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltpts\" (UniqueName: \"kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts\") pod \"ceilometer-0\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.652046 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zdlz\" (UniqueName: \"kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.652119 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.652155 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.652187 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.757619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zdlz\" (UniqueName: \"kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.757690 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.757724 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.757761 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.771658 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.774151 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.786958 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zdlz\" (UniqueName: \"kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.802646 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data\") pod \"nova-cell1-cell-mapping-f76js\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.849949 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:03:44 crc kubenswrapper[4852]: I0129 11:03:44.989840 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.096933 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerStarted","Data":"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5"} Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.324165 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.480249 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3affc64f-bcd8-4008-b2cf-23295a80e0cb" path="/var/lib/kubelet/pods/3affc64f-bcd8-4008-b2cf-23295a80e0cb/volumes" Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.489337 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f76js"] Jan 29 11:03:45 crc kubenswrapper[4852]: W0129 11:03:45.494743 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda25d15a2_cada_4765_bd31_40f3e42a0edb.slice/crio-244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6 WatchSource:0}: Error finding container 244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6: Status 404 returned error can't find the container with id 244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6 Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.610750 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.667252 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:03:45 crc kubenswrapper[4852]: I0129 11:03:45.667477 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="dnsmasq-dns" containerID="cri-o://66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85" gracePeriod=10 Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.093832 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.109137 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerStarted","Data":"ee38b63940b8c7e58191b4d6b17892bf42579cd381126b237554e32af0499e46"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.115237 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerStarted","Data":"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.124377 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f76js" event={"ID":"a25d15a2-cada-4765-bd31-40f3e42a0edb","Type":"ContainerStarted","Data":"a2ff805ec21cb9448761c97cfa8e29d4dcd8359611c43f72b2b39865d075b412"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.124420 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f76js" event={"ID":"a25d15a2-cada-4765-bd31-40f3e42a0edb","Type":"ContainerStarted","Data":"244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.130054 4852 generic.go:334] "Generic (PLEG): container finished" podID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerID="66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85" exitCode=0 Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.130120 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" event={"ID":"3a1d369c-e67c-4fa5-897f-38a385e6841b","Type":"ContainerDied","Data":"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.130149 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" event={"ID":"3a1d369c-e67c-4fa5-897f-38a385e6841b","Type":"ContainerDied","Data":"471c51ea4bca1f7aa9cbbdefbd79d8c7efce62b906ff81f8edcc5b08ad3e1613"} Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.130175 4852 scope.go:117] "RemoveContainer" containerID="66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.130202 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.191881 4852 scope.go:117] "RemoveContainer" containerID="f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.192761 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.192818 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.192867 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wbdm\" (UniqueName: \"kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.192886 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.193070 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.193121 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0\") pod \"3a1d369c-e67c-4fa5-897f-38a385e6841b\" (UID: \"3a1d369c-e67c-4fa5-897f-38a385e6841b\") " Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.194979 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.194962134 podStartE2EDuration="3.194962134s" podCreationTimestamp="2026-01-29 11:03:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:46.142053272 +0000 UTC m=+1323.359384426" watchObservedRunningTime="2026-01-29 11:03:46.194962134 +0000 UTC m=+1323.412293268" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.203064 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-f76js" podStartSLOduration=2.203045601 podStartE2EDuration="2.203045601s" podCreationTimestamp="2026-01-29 11:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:46.169646556 +0000 UTC m=+1323.386977680" watchObservedRunningTime="2026-01-29 11:03:46.203045601 +0000 UTC m=+1323.420376735" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.206499 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm" (OuterVolumeSpecName: "kube-api-access-5wbdm") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "kube-api-access-5wbdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.217862 4852 scope.go:117] "RemoveContainer" containerID="66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85" Jan 29 11:03:46 crc kubenswrapper[4852]: E0129 11:03:46.218316 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85\": container with ID starting with 66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85 not found: ID does not exist" containerID="66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.218357 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85"} err="failed to get container status \"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85\": rpc error: code = NotFound desc = could not find container \"66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85\": container with ID starting with 66b49d5cfde8e07d2462a02723e61170a4fa6a64372d84647dd8daaf9533be85 not found: ID does not exist" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.218385 4852 scope.go:117] "RemoveContainer" containerID="f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7" Jan 29 11:03:46 crc kubenswrapper[4852]: E0129 11:03:46.218711 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7\": container with ID starting with f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7 not found: ID does not exist" containerID="f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.218757 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7"} err="failed to get container status \"f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7\": rpc error: code = NotFound desc = could not find container \"f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7\": container with ID starting with f729158526be43b43f766ac969dcd7c6101fc15f71db9a5d2552f8eb42df23f7 not found: ID does not exist" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.244244 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.252224 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.254447 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.270213 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config" (OuterVolumeSpecName: "config") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.272324 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3a1d369c-e67c-4fa5-897f-38a385e6841b" (UID: "3a1d369c-e67c-4fa5-897f-38a385e6841b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303788 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303826 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303836 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303844 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303854 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wbdm\" (UniqueName: \"kubernetes.io/projected/3a1d369c-e67c-4fa5-897f-38a385e6841b-kube-api-access-5wbdm\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.303863 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3a1d369c-e67c-4fa5-897f-38a385e6841b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.469859 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:03:46 crc kubenswrapper[4852]: I0129 11:03:46.480026 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-blg6c"] Jan 29 11:03:47 crc kubenswrapper[4852]: I0129 11:03:47.143425 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerStarted","Data":"b41dd88e530d0078eac9379893c00ca26e6907c39d4da306037e9f16ecf118b4"} Jan 29 11:03:47 crc kubenswrapper[4852]: I0129 11:03:47.143661 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerStarted","Data":"d816637dddc4a3042b5748abee104ea2ba5b25b7a148418ca6e26b6a15ced4dc"} Jan 29 11:03:47 crc kubenswrapper[4852]: I0129 11:03:47.474234 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" path="/var/lib/kubelet/pods/3a1d369c-e67c-4fa5-897f-38a385e6841b/volumes" Jan 29 11:03:48 crc kubenswrapper[4852]: I0129 11:03:48.156717 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerStarted","Data":"fce074b5c8c23ae7e6f2429e922beae69e6f5e352d49342fb4993c72c8e442cd"} Jan 29 11:03:50 crc kubenswrapper[4852]: I0129 11:03:50.177183 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerStarted","Data":"3fba4011047b82105d23692b2e3c2f2018cbd57dcbd733255f9715aaff21b5b9"} Jan 29 11:03:50 crc kubenswrapper[4852]: I0129 11:03:50.177603 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 11:03:50 crc kubenswrapper[4852]: I0129 11:03:50.220320 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.292680329 podStartE2EDuration="6.220296116s" podCreationTimestamp="2026-01-29 11:03:44 +0000 UTC" firstStartedPulling="2026-01-29 11:03:45.354365759 +0000 UTC m=+1322.571696893" lastFinishedPulling="2026-01-29 11:03:49.281981546 +0000 UTC m=+1326.499312680" observedRunningTime="2026-01-29 11:03:50.199650212 +0000 UTC m=+1327.416981376" watchObservedRunningTime="2026-01-29 11:03:50.220296116 +0000 UTC m=+1327.437627260" Jan 29 11:03:51 crc kubenswrapper[4852]: I0129 11:03:51.019991 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-865f5d856f-blg6c" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.192:5353: i/o timeout" Jan 29 11:03:51 crc kubenswrapper[4852]: I0129 11:03:51.186435 4852 generic.go:334] "Generic (PLEG): container finished" podID="a25d15a2-cada-4765-bd31-40f3e42a0edb" containerID="a2ff805ec21cb9448761c97cfa8e29d4dcd8359611c43f72b2b39865d075b412" exitCode=0 Jan 29 11:03:51 crc kubenswrapper[4852]: I0129 11:03:51.186540 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f76js" event={"ID":"a25d15a2-cada-4765-bd31-40f3e42a0edb","Type":"ContainerDied","Data":"a2ff805ec21cb9448761c97cfa8e29d4dcd8359611c43f72b2b39865d075b412"} Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.444759 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.586810 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.629710 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle\") pod \"a25d15a2-cada-4765-bd31-40f3e42a0edb\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.629837 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zdlz\" (UniqueName: \"kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz\") pod \"a25d15a2-cada-4765-bd31-40f3e42a0edb\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.630013 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts\") pod \"a25d15a2-cada-4765-bd31-40f3e42a0edb\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.630099 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data\") pod \"a25d15a2-cada-4765-bd31-40f3e42a0edb\" (UID: \"a25d15a2-cada-4765-bd31-40f3e42a0edb\") " Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.650462 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts" (OuterVolumeSpecName: "scripts") pod "a25d15a2-cada-4765-bd31-40f3e42a0edb" (UID: "a25d15a2-cada-4765-bd31-40f3e42a0edb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.666792 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz" (OuterVolumeSpecName: "kube-api-access-6zdlz") pod "a25d15a2-cada-4765-bd31-40f3e42a0edb" (UID: "a25d15a2-cada-4765-bd31-40f3e42a0edb"). InnerVolumeSpecName "kube-api-access-6zdlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.669390 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data" (OuterVolumeSpecName: "config-data") pod "a25d15a2-cada-4765-bd31-40f3e42a0edb" (UID: "a25d15a2-cada-4765-bd31-40f3e42a0edb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.695157 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a25d15a2-cada-4765-bd31-40f3e42a0edb" (UID: "a25d15a2-cada-4765-bd31-40f3e42a0edb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.733159 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.733203 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.733218 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zdlz\" (UniqueName: \"kubernetes.io/projected/a25d15a2-cada-4765-bd31-40f3e42a0edb-kube-api-access-6zdlz\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:52 crc kubenswrapper[4852]: I0129 11:03:52.733230 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a25d15a2-cada-4765-bd31-40f3e42a0edb-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.210134 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f76js" event={"ID":"a25d15a2-cada-4765-bd31-40f3e42a0edb","Type":"ContainerDied","Data":"244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6"} Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.210176 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="244d3e5bf61f51a68e716cf2b5723eca3b9130b1d97c34f6e584a5349a7979d6" Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.210242 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f76js" Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.389656 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.390095 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-log" containerID="cri-o://bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" gracePeriod=30 Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.390788 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-api" containerID="cri-o://a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" gracePeriod=30 Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.430458 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.431407 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerName="nova-scheduler-scheduler" containerID="cri-o://c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" gracePeriod=30 Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.459598 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.461309 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" containerID="cri-o://ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713" gracePeriod=30 Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.461716 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" containerID="cri-o://4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4" gracePeriod=30 Jan 29 11:03:53 crc kubenswrapper[4852]: I0129 11:03:53.962849 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.061876 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.061945 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.062001 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.062059 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7r57p\" (UniqueName: \"kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.062164 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.062227 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data\") pod \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\" (UID: \"f0d4e816-9d2e-42de-a67c-6711b756e7e1\") " Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.062640 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs" (OuterVolumeSpecName: "logs") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.066796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p" (OuterVolumeSpecName: "kube-api-access-7r57p") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "kube-api-access-7r57p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.105884 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.106114 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.109148 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data" (OuterVolumeSpecName: "config-data") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.109968 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.118175 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.118298 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerName="nova-scheduler-scheduler" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.128741 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.136709 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f0d4e816-9d2e-42de-a67c-6711b756e7e1" (UID: "f0d4e816-9d2e-42de-a67c-6711b756e7e1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.163946 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.163979 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7r57p\" (UniqueName: \"kubernetes.io/projected/f0d4e816-9d2e-42de-a67c-6711b756e7e1-kube-api-access-7r57p\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.163989 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0d4e816-9d2e-42de-a67c-6711b756e7e1-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.163998 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.164007 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.164015 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0d4e816-9d2e-42de-a67c-6711b756e7e1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.223605 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerDied","Data":"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713"} Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.223568 4852 generic.go:334] "Generic (PLEG): container finished" podID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerID="ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713" exitCode=143 Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226325 4852 generic.go:334] "Generic (PLEG): container finished" podID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerID="a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" exitCode=0 Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226346 4852 generic.go:334] "Generic (PLEG): container finished" podID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerID="bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" exitCode=143 Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226360 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerDied","Data":"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e"} Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226376 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerDied","Data":"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5"} Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226386 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f0d4e816-9d2e-42de-a67c-6711b756e7e1","Type":"ContainerDied","Data":"d341567357a403f7ee3635626cf596b47810e31dfb949431689a8984acab1918"} Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226402 4852 scope.go:117] "RemoveContainer" containerID="a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.226518 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.255828 4852 scope.go:117] "RemoveContainer" containerID="bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.279085 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.287548 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.291116 4852 scope.go:117] "RemoveContainer" containerID="a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.291547 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e\": container with ID starting with a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e not found: ID does not exist" containerID="a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.291604 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e"} err="failed to get container status \"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e\": rpc error: code = NotFound desc = could not find container \"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e\": container with ID starting with a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e not found: ID does not exist" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.291632 4852 scope.go:117] "RemoveContainer" containerID="bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.292036 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5\": container with ID starting with bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5 not found: ID does not exist" containerID="bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.292089 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5"} err="failed to get container status \"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5\": rpc error: code = NotFound desc = could not find container \"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5\": container with ID starting with bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5 not found: ID does not exist" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.292122 4852 scope.go:117] "RemoveContainer" containerID="a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.292405 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e"} err="failed to get container status \"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e\": rpc error: code = NotFound desc = could not find container \"a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e\": container with ID starting with a7d58b8af14442e152c253218c9ce4e35604e89760f7a53f48b78461e1b6644e not found: ID does not exist" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.292439 4852 scope.go:117] "RemoveContainer" containerID="bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.292707 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5"} err="failed to get container status \"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5\": rpc error: code = NotFound desc = could not find container \"bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5\": container with ID starting with bde6e43386d77407c28a69c6e133cb94cbae3939d3801fc41a435a3b940e3ca5 not found: ID does not exist" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.296644 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.297198 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="init" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297222 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="init" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.297248 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-log" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297259 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-log" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.297280 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d15a2-cada-4765-bd31-40f3e42a0edb" containerName="nova-manage" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297287 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d15a2-cada-4765-bd31-40f3e42a0edb" containerName="nova-manage" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.297306 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-api" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297312 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-api" Jan 29 11:03:54 crc kubenswrapper[4852]: E0129 11:03:54.297326 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="dnsmasq-dns" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297333 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="dnsmasq-dns" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297542 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-api" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297567 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25d15a2-cada-4765-bd31-40f3e42a0edb" containerName="nova-manage" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297598 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" containerName="nova-api-log" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.297608 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a1d369c-e67c-4fa5-897f-38a385e6841b" containerName="dnsmasq-dns" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.298813 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.301203 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.301649 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.304004 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.307371 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369289 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369330 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2px9m\" (UniqueName: \"kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369368 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369496 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369534 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.369617 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470638 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470682 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470742 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470772 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470791 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2px9m\" (UniqueName: \"kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.470815 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.471563 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.475055 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.475091 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.476196 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.477105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.489748 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2px9m\" (UniqueName: \"kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m\") pod \"nova-api-0\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " pod="openstack/nova-api-0" Jan 29 11:03:54 crc kubenswrapper[4852]: I0129 11:03:54.620261 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:03:55 crc kubenswrapper[4852]: I0129 11:03:55.133561 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:03:55 crc kubenswrapper[4852]: W0129 11:03:55.137031 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda31f5d33_4598_4ecb_9b2c_fc8271e8d29e.slice/crio-732d58c88cef74e39f596b2616b324d23c644efa7ea0d2ed0f73157102ab85f6 WatchSource:0}: Error finding container 732d58c88cef74e39f596b2616b324d23c644efa7ea0d2ed0f73157102ab85f6: Status 404 returned error can't find the container with id 732d58c88cef74e39f596b2616b324d23c644efa7ea0d2ed0f73157102ab85f6 Jan 29 11:03:55 crc kubenswrapper[4852]: I0129 11:03:55.236274 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerStarted","Data":"732d58c88cef74e39f596b2616b324d23c644efa7ea0d2ed0f73157102ab85f6"} Jan 29 11:03:55 crc kubenswrapper[4852]: I0129 11:03:55.476373 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0d4e816-9d2e-42de-a67c-6711b756e7e1" path="/var/lib/kubelet/pods/f0d4e816-9d2e-42de-a67c-6711b756e7e1/volumes" Jan 29 11:03:56 crc kubenswrapper[4852]: I0129 11:03:56.251833 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerStarted","Data":"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2"} Jan 29 11:03:56 crc kubenswrapper[4852]: I0129 11:03:56.252114 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerStarted","Data":"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5"} Jan 29 11:03:56 crc kubenswrapper[4852]: I0129 11:03:56.278292 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.278271706 podStartE2EDuration="2.278271706s" podCreationTimestamp="2026-01-29 11:03:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:03:56.276044813 +0000 UTC m=+1333.493375957" watchObservedRunningTime="2026-01-29 11:03:56.278271706 +0000 UTC m=+1333.495602840" Jan 29 11:03:56 crc kubenswrapper[4852]: I0129 11:03:56.589801 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:60114->10.217.0.198:8775: read: connection reset by peer" Jan 29 11:03:56 crc kubenswrapper[4852]: I0129 11:03:56.589866 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:60120->10.217.0.198:8775: read: connection reset by peer" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.070011 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.230782 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs\") pod \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.230835 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data\") pod \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.230968 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rh9d5\" (UniqueName: \"kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5\") pod \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.231087 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle\") pod \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.231113 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs\") pod \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\" (UID: \"ae11b17a-4e83-4f95-8390-5b0d1159fd6d\") " Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.233011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs" (OuterVolumeSpecName: "logs") pod "ae11b17a-4e83-4f95-8390-5b0d1159fd6d" (UID: "ae11b17a-4e83-4f95-8390-5b0d1159fd6d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.260864 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5" (OuterVolumeSpecName: "kube-api-access-rh9d5") pod "ae11b17a-4e83-4f95-8390-5b0d1159fd6d" (UID: "ae11b17a-4e83-4f95-8390-5b0d1159fd6d"). InnerVolumeSpecName "kube-api-access-rh9d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.289030 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae11b17a-4e83-4f95-8390-5b0d1159fd6d" (UID: "ae11b17a-4e83-4f95-8390-5b0d1159fd6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.289922 4852 generic.go:334] "Generic (PLEG): container finished" podID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerID="4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4" exitCode=0 Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.290869 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.291001 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerDied","Data":"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4"} Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.291030 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae11b17a-4e83-4f95-8390-5b0d1159fd6d","Type":"ContainerDied","Data":"3513c2a7edef22223380bac870f6126b6e9b4488db6706e526794b832eb2f584"} Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.291046 4852 scope.go:117] "RemoveContainer" containerID="4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.320818 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data" (OuterVolumeSpecName: "config-data") pod "ae11b17a-4e83-4f95-8390-5b0d1159fd6d" (UID: "ae11b17a-4e83-4f95-8390-5b0d1159fd6d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.342913 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rh9d5\" (UniqueName: \"kubernetes.io/projected/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-kube-api-access-rh9d5\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.343095 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.343172 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.343262 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.367859 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ae11b17a-4e83-4f95-8390-5b0d1159fd6d" (UID: "ae11b17a-4e83-4f95-8390-5b0d1159fd6d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.379088 4852 scope.go:117] "RemoveContainer" containerID="ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.400500 4852 scope.go:117] "RemoveContainer" containerID="4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4" Jan 29 11:03:57 crc kubenswrapper[4852]: E0129 11:03:57.400991 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4\": container with ID starting with 4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4 not found: ID does not exist" containerID="4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.401028 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4"} err="failed to get container status \"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4\": rpc error: code = NotFound desc = could not find container \"4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4\": container with ID starting with 4637d9ec3913c2d58c90e2055ad5175f66c90de876634ea5201dd9ddc544c1a4 not found: ID does not exist" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.401054 4852 scope.go:117] "RemoveContainer" containerID="ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713" Jan 29 11:03:57 crc kubenswrapper[4852]: E0129 11:03:57.401391 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713\": container with ID starting with ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713 not found: ID does not exist" containerID="ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.401408 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713"} err="failed to get container status \"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713\": rpc error: code = NotFound desc = could not find container \"ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713\": container with ID starting with ab4497584421e38e7a7b6e943e5aea8d509741318f58920f0bd19bad2d702713 not found: ID does not exist" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.445495 4852 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae11b17a-4e83-4f95-8390-5b0d1159fd6d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.614769 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.624092 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.653431 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:57 crc kubenswrapper[4852]: E0129 11:03:57.655836 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.655858 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" Jan 29 11:03:57 crc kubenswrapper[4852]: E0129 11:03:57.655874 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.655881 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.656086 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-metadata" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.656110 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" containerName="nova-metadata-log" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.657038 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.661828 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.665861 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.665895 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.750281 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.750437 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.750464 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kh6r\" (UniqueName: \"kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.750508 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.750530 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852157 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852206 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kh6r\" (UniqueName: \"kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852251 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852359 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.852856 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.857739 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.858337 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.862226 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:57 crc kubenswrapper[4852]: I0129 11:03:57.875912 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kh6r\" (UniqueName: \"kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r\") pod \"nova-metadata-0\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " pod="openstack/nova-metadata-0" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.039183 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.365145 4852 generic.go:334] "Generic (PLEG): container finished" podID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerID="c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" exitCode=0 Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.365329 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2738152f-742e-46f9-b7b7-3db29a88cf62","Type":"ContainerDied","Data":"c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730"} Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.412973 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.571340 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k96vz\" (UniqueName: \"kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz\") pod \"2738152f-742e-46f9-b7b7-3db29a88cf62\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.571605 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data\") pod \"2738152f-742e-46f9-b7b7-3db29a88cf62\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.571698 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle\") pod \"2738152f-742e-46f9-b7b7-3db29a88cf62\" (UID: \"2738152f-742e-46f9-b7b7-3db29a88cf62\") " Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.576144 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz" (OuterVolumeSpecName: "kube-api-access-k96vz") pod "2738152f-742e-46f9-b7b7-3db29a88cf62" (UID: "2738152f-742e-46f9-b7b7-3db29a88cf62"). InnerVolumeSpecName "kube-api-access-k96vz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.600366 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data" (OuterVolumeSpecName: "config-data") pod "2738152f-742e-46f9-b7b7-3db29a88cf62" (UID: "2738152f-742e-46f9-b7b7-3db29a88cf62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.621653 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2738152f-742e-46f9-b7b7-3db29a88cf62" (UID: "2738152f-742e-46f9-b7b7-3db29a88cf62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.622390 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.675855 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k96vz\" (UniqueName: \"kubernetes.io/projected/2738152f-742e-46f9-b7b7-3db29a88cf62-kube-api-access-k96vz\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.675894 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:58 crc kubenswrapper[4852]: I0129 11:03:58.675903 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2738152f-742e-46f9-b7b7-3db29a88cf62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:03:58 crc kubenswrapper[4852]: W0129 11:03:58.917253 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99bfcf5b_7ab7_4c29_93ed_e7661d26a7c2.slice/crio-ef0d72dbc47522942e34315d745935b38756123b1c5354e77002335cb554c5f8 WatchSource:0}: Error finding container ef0d72dbc47522942e34315d745935b38756123b1c5354e77002335cb554c5f8: Status 404 returned error can't find the container with id ef0d72dbc47522942e34315d745935b38756123b1c5354e77002335cb554c5f8 Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.378002 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2738152f-742e-46f9-b7b7-3db29a88cf62","Type":"ContainerDied","Data":"3bbbb669b115d02a9595879f93fcabbad2a69fb79743bfc3cf646bdf1806a47c"} Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.378030 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.378311 4852 scope.go:117] "RemoveContainer" containerID="c387858a5627c4e4d6eaca088bb1059e412f20abcd92eaf6d9001f31baecf730" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.379514 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerStarted","Data":"ef0d72dbc47522942e34315d745935b38756123b1c5354e77002335cb554c5f8"} Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.416616 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.425874 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.434639 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:59 crc kubenswrapper[4852]: E0129 11:03:59.435035 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerName="nova-scheduler-scheduler" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.435051 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerName="nova-scheduler-scheduler" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.435213 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" containerName="nova-scheduler-scheduler" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.435806 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.438524 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.447891 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.530133 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.530421 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.530473 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrwpr\" (UniqueName: \"kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.559710 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2738152f-742e-46f9-b7b7-3db29a88cf62" path="/var/lib/kubelet/pods/2738152f-742e-46f9-b7b7-3db29a88cf62/volumes" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.561066 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae11b17a-4e83-4f95-8390-5b0d1159fd6d" path="/var/lib/kubelet/pods/ae11b17a-4e83-4f95-8390-5b0d1159fd6d/volumes" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.632638 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.632737 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrwpr\" (UniqueName: \"kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.632875 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.637671 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.639034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.651301 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrwpr\" (UniqueName: \"kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr\") pod \"nova-scheduler-0\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " pod="openstack/nova-scheduler-0" Jan 29 11:03:59 crc kubenswrapper[4852]: I0129 11:03:59.753244 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.017438 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.017792 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.205139 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:04:00 crc kubenswrapper[4852]: W0129 11:04:00.206206 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod786f3a4d_fe5e_42a2_84f7_7e8b3bd038f8.slice/crio-cc67314fe2591c62afd8046c24d5db7daadbc58a89e4d100b6013ec098757fe8 WatchSource:0}: Error finding container cc67314fe2591c62afd8046c24d5db7daadbc58a89e4d100b6013ec098757fe8: Status 404 returned error can't find the container with id cc67314fe2591c62afd8046c24d5db7daadbc58a89e4d100b6013ec098757fe8 Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.396026 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerStarted","Data":"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035"} Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.396067 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerStarted","Data":"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb"} Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.404186 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8","Type":"ContainerStarted","Data":"cc67314fe2591c62afd8046c24d5db7daadbc58a89e4d100b6013ec098757fe8"} Jan 29 11:04:00 crc kubenswrapper[4852]: I0129 11:04:00.430515 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.430498077 podStartE2EDuration="3.430498077s" podCreationTimestamp="2026-01-29 11:03:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:04:00.428897938 +0000 UTC m=+1337.646229072" watchObservedRunningTime="2026-01-29 11:04:00.430498077 +0000 UTC m=+1337.647829211" Jan 29 11:04:01 crc kubenswrapper[4852]: I0129 11:04:01.417746 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8","Type":"ContainerStarted","Data":"ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853"} Jan 29 11:04:01 crc kubenswrapper[4852]: I0129 11:04:01.436317 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.436296574 podStartE2EDuration="2.436296574s" podCreationTimestamp="2026-01-29 11:03:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:04:01.434869719 +0000 UTC m=+1338.652200853" watchObservedRunningTime="2026-01-29 11:04:01.436296574 +0000 UTC m=+1338.653627708" Jan 29 11:04:03 crc kubenswrapper[4852]: I0129 11:04:03.040699 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 11:04:03 crc kubenswrapper[4852]: I0129 11:04:03.040964 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 11:04:04 crc kubenswrapper[4852]: I0129 11:04:04.620514 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:04:04 crc kubenswrapper[4852]: I0129 11:04:04.620963 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 11:04:04 crc kubenswrapper[4852]: I0129 11:04:04.754209 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 11:04:05 crc kubenswrapper[4852]: I0129 11:04:05.633828 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:04:05 crc kubenswrapper[4852]: I0129 11:04:05.633825 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:04:08 crc kubenswrapper[4852]: I0129 11:04:08.040666 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 11:04:08 crc kubenswrapper[4852]: I0129 11:04:08.041279 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 11:04:09 crc kubenswrapper[4852]: I0129 11:04:09.062243 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:04:09 crc kubenswrapper[4852]: I0129 11:04:09.062750 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 11:04:09 crc kubenswrapper[4852]: I0129 11:04:09.754352 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 11:04:09 crc kubenswrapper[4852]: I0129 11:04:09.783526 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 11:04:10 crc kubenswrapper[4852]: I0129 11:04:10.563495 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 11:04:14 crc kubenswrapper[4852]: I0129 11:04:14.626796 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 11:04:14 crc kubenswrapper[4852]: I0129 11:04:14.627702 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 11:04:14 crc kubenswrapper[4852]: I0129 11:04:14.630598 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 11:04:14 crc kubenswrapper[4852]: I0129 11:04:14.634248 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 11:04:14 crc kubenswrapper[4852]: I0129 11:04:14.859619 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 11:04:15 crc kubenswrapper[4852]: I0129 11:04:15.625472 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 11:04:15 crc kubenswrapper[4852]: I0129 11:04:15.632277 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 11:04:18 crc kubenswrapper[4852]: I0129 11:04:18.047695 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 11:04:18 crc kubenswrapper[4852]: I0129 11:04:18.052869 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 11:04:18 crc kubenswrapper[4852]: I0129 11:04:18.055336 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 11:04:18 crc kubenswrapper[4852]: I0129 11:04:18.655182 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.017575 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.018232 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.018293 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.019228 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.019299 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087" gracePeriod=600 Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.767519 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087" exitCode=0 Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.767671 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087"} Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.767957 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f"} Jan 29 11:04:30 crc kubenswrapper[4852]: I0129 11:04:30.768028 4852 scope.go:117] "RemoveContainer" containerID="d25938c544c94cb7ff57505e6e76ac88750fccb2f6818b7dc821d1e097f62ced" Jan 29 11:04:35 crc kubenswrapper[4852]: I0129 11:04:35.941079 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 11:04:35 crc kubenswrapper[4852]: I0129 11:04:35.941665 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="20516ac5-463a-4d2c-a442-d74254876ddf" containerName="openstackclient" containerID="cri-o://9e2fbd4b9daaa437f8f321cc9702964e5924353cd1b5d954ec556bcd5e7b8cfd" gracePeriod=2 Jan 29 11:04:35 crc kubenswrapper[4852]: I0129 11:04:35.976093 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.253235 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.266405 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.272936 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20516ac5-463a-4d2c-a442-d74254876ddf" containerName="openstackclient" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.272969 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="20516ac5-463a-4d2c-a442-d74254876ddf" containerName="openstackclient" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.273183 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="20516ac5-463a-4d2c-a442-d74254876ddf" containerName="openstackclient" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.273768 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.283164 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.288171 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-cmfc4"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.310776 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.320264 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-cmfc4"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.329937 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.330173 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="ovn-northd" containerID="cri-o://9b8c01c6c407ae7627a56b9f7843d60ca4bb8c2d21417edb2f07b4193f385d24" gracePeriod=30 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.330610 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="openstack-network-exporter" containerID="cri-o://a923b5cbfe6f467279e0395781d92a15fa09516398b7b3fef3ba567acaf19c1b" gracePeriod=30 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.344300 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5xps\" (UniqueName: \"kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.344341 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.351864 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.351933 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data podName:f84fb26d-e835-4d75-95d5-695b6e033bb7 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:36.851913583 +0000 UTC m=+1374.069244707 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7") : configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.460760 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5xps\" (UniqueName: \"kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.460813 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.470271 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.483882 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.485197 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.489835 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.514894 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.527847 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5xps\" (UniqueName: \"kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps\") pod \"nova-cell1-5040-account-create-update-jmf7z\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.550652 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-cd05-account-create-update-27smf"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.562104 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.562201 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npdhd\" (UniqueName: \"kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.581397 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.597499 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-cd05-account-create-update-27smf"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.598860 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.613120 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-9kkxs"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.619802 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-9kkxs"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.629218 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.630109 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="openstack-network-exporter" containerID="cri-o://71d22b58d4dc22fc8a888b0e9739ec5f63964d6176341a6a37e66f1cb7ee656c" gracePeriod=300 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.650364 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.652011 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.654632 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.665116 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.665506 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npdhd\" (UniqueName: \"kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.665904 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.665971 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data podName:1ab8189f-e95a-47b5-a130-5404901974e2 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:37.165943649 +0000 UTC m=+1374.383274783 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data") pod "rabbitmq-server-0" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2") : configmap "rabbitmq-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.667027 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.675252 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.682397 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.686893 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.699950 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npdhd\" (UniqueName: \"kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd\") pod \"root-account-create-update-cjmvw\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.723832 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f82e-account-create-update-86cb7"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.750074 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.765307 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f82e-account-create-update-86cb7"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.770743 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="ovsdbserver-sb" containerID="cri-o://5a4569a0c66938bb15ae418dc4474095bccb853bb517e38eb2c06c8c521e60aa" gracePeriod=300 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.786455 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.803697 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.864935 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.868996 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="openstack-network-exporter" containerID="cri-o://9caeeb9cc777b568b828a8350ae3044f5968f1db84c2eef74d143f409773f59a" gracePeriod=300 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.870256 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.870375 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zndc8\" (UniqueName: \"kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.870427 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stw64\" (UniqueName: \"kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.870471 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.870674 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: E0129 11:04:36.870723 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data podName:f84fb26d-e835-4d75-95d5-695b6e033bb7 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:37.870705657 +0000 UTC m=+1375.088036781 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7") : configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.895228 4852 generic.go:334] "Generic (PLEG): container finished" podID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerID="71d22b58d4dc22fc8a888b0e9739ec5f63964d6176341a6a37e66f1cb7ee656c" exitCode=2 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.895336 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerDied","Data":"71d22b58d4dc22fc8a888b0e9739ec5f63964d6176341a6a37e66f1cb7ee656c"} Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.917198 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_b99e639e-b687-4552-bfa0-ed4391283aaf/ovn-northd/0.log" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.917256 4852 generic.go:334] "Generic (PLEG): container finished" podID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerID="a923b5cbfe6f467279e0395781d92a15fa09516398b7b3fef3ba567acaf19c1b" exitCode=2 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.917278 4852 generic.go:334] "Generic (PLEG): container finished" podID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerID="9b8c01c6c407ae7627a56b9f7843d60ca4bb8c2d21417edb2f07b4193f385d24" exitCode=143 Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.917305 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerDied","Data":"a923b5cbfe6f467279e0395781d92a15fa09516398b7b3fef3ba567acaf19c1b"} Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.917335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerDied","Data":"9b8c01c6c407ae7627a56b9f7843d60ca4bb8c2d21417edb2f07b4193f385d24"} Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.971902 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zndc8\" (UniqueName: \"kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.972000 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stw64\" (UniqueName: \"kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.972073 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.972125 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.972989 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:36 crc kubenswrapper[4852]: I0129 11:04:36.974421 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.008368 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zndc8\" (UniqueName: \"kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8\") pod \"glance-cfee-account-create-update-8f8sh\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.008380 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stw64\" (UniqueName: \"kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64\") pod \"neutron-4bcf-account-create-update-q7t5b\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.044143 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-h2ljt"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.077692 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-h2ljt"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.079164 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="ovsdbserver-nb" containerID="cri-o://2efbb95f790b03b0a620fc69f9cb3727ff67bc70ca3bd32306962a43832948f8" gracePeriod=300 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.086724 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.117029 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.118869 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4bcf-account-create-update-g4lz6"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.140690 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4bcf-account-create-update-g4lz6"] Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.180035 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.180123 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data podName:1ab8189f-e95a-47b5-a130-5404901974e2 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:38.18008681 +0000 UTC m=+1375.397417944 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data") pod "rabbitmq-server-0" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2") : configmap "rabbitmq-config-data" not found Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.193542 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vn9vq"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.214693 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vn9vq"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.223105 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-cfee-account-create-update-5hl62"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.255913 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-pxxmr"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.275234 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-cfee-account-create-update-5hl62"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.286840 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-pxxmr"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.308652 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-nwllr"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.331572 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-nwllr"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.540120 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08ba6d45-1a5b-4131-baae-76160239df48" path="/var/lib/kubelet/pods/08ba6d45-1a5b-4131-baae-76160239df48/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.553095 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ce36949-28e5-43d1-ae87-7cf9113cc884" path="/var/lib/kubelet/pods/2ce36949-28e5-43d1-ae87-7cf9113cc884/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.569492 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f96a744-12c9-4dd2-908d-d1984b7fccfa" path="/var/lib/kubelet/pods/4f96a744-12c9-4dd2-908d-d1984b7fccfa/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.570238 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5640101e-33ca-4d7f-a5db-2ddd5f04cb03" path="/var/lib/kubelet/pods/5640101e-33ca-4d7f-a5db-2ddd5f04cb03/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.570939 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="815af2ae-6f87-4b01-9712-43fb3c70f9a7" path="/var/lib/kubelet/pods/815af2ae-6f87-4b01-9712-43fb3c70f9a7/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.574008 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86ae0309-cead-43f3-bf5b-617d972977c5" path="/var/lib/kubelet/pods/86ae0309-cead-43f3-bf5b-617d972977c5/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.575274 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88726a03-9dc5-49b5-b4cc-60b521b51d61" path="/var/lib/kubelet/pods/88726a03-9dc5-49b5-b4cc-60b521b51d61/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.576051 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a595383-5c9f-4b27-9612-4a1408221623" path="/var/lib/kubelet/pods/8a595383-5c9f-4b27-9612-4a1408221623/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.577161 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5ef08b5-420f-46cd-bc10-f021836fd6ee" path="/var/lib/kubelet/pods/d5ef08b5-420f-46cd-bc10-f021836fd6ee/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.608086 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7cb1fa8-606f-44bb-9b88-85bfbf76ba22" path="/var/lib/kubelet/pods/e7cb1fa8-606f-44bb-9b88-85bfbf76ba22/volumes" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.609403 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-f76js"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.611294 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-f76js"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.613057 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.622638 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="probe" containerID="cri-o://115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.622733 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="cinder-scheduler" containerID="cri-o://fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.634701 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.692307 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.692553 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-rxcgs" podUID="1d4860e9-e690-409f-bc12-86a1c51e6db1" containerName="openstack-network-exporter" containerID="cri-o://655c5a497b7193f8e8b2150d018d78c5d4b8e8338aa5d573bffd5c89cd5db084" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.765207 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.806249 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_b99e639e-b687-4552-bfa0-ed4391283aaf/ovn-northd/0.log" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.806330 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.848374 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.848673 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="dnsmasq-dns" containerID="cri-o://907e93ef39a4a6500cbf037ce8f0712f50de707858f25b39621cba63db775ba4" gracePeriod=10 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.879609 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lrj96"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.889818 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lrj96"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947220 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947324 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947350 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947386 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzrdq\" (UniqueName: \"kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947554 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947589 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.947625 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs\") pod \"b99e639e-b687-4552-bfa0-ed4391283aaf\" (UID: \"b99e639e-b687-4552-bfa0-ed4391283aaf\") " Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.949361 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-n2g46"] Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.949399 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.949447 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data podName:f84fb26d-e835-4d75-95d5-695b6e033bb7 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:39.949432222 +0000 UTC m=+1377.166763356 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7") : configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.950239 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.950776 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts" (OuterVolumeSpecName: "scripts") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.950804 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config" (OuterVolumeSpecName: "config") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.961257 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq" (OuterVolumeSpecName: "kube-api-access-xzrdq") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "kube-api-access-xzrdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.968721 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-n2g46"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.970424 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_b99e639e-b687-4552-bfa0-ed4391283aaf/ovn-northd/0.log" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.970555 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.971194 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b99e639e-b687-4552-bfa0-ed4391283aaf","Type":"ContainerDied","Data":"4e70ab5c672e7441c2c7feba35acabc2b55fa9d89e93a178b684ce859c3f6cf3"} Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.971226 4852 scope.go:117] "RemoveContainer" containerID="a923b5cbfe6f467279e0395781d92a15fa09516398b7b3fef3ba567acaf19c1b" Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.979138 4852 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 11:04:37 crc kubenswrapper[4852]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: if [ -n "nova_cell1" ]; then Jan 29 11:04:37 crc kubenswrapper[4852]: GRANT_DATABASE="nova_cell1" Jan 29 11:04:37 crc kubenswrapper[4852]: else Jan 29 11:04:37 crc kubenswrapper[4852]: GRANT_DATABASE="*" Jan 29 11:04:37 crc kubenswrapper[4852]: fi Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: # going for maximum compatibility here: Jan 29 11:04:37 crc kubenswrapper[4852]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 11:04:37 crc kubenswrapper[4852]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 11:04:37 crc kubenswrapper[4852]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 11:04:37 crc kubenswrapper[4852]: # support updates Jan 29 11:04:37 crc kubenswrapper[4852]: Jan 29 11:04:37 crc kubenswrapper[4852]: $MYSQL_CMD < logger="UnhandledError" Jan 29 11:04:37 crc kubenswrapper[4852]: E0129 11:04:37.983487 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" podUID="5185a393-4eff-496b-bd08-a8a91ada2a17" Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.986401 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.996859 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api-log" containerID="cri-o://5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.997394 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api" containerID="cri-o://f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.997591 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.997803 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5c4b86c744-rrhm8" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-log" containerID="cri-o://6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9" gracePeriod=30 Jan 29 11:04:37 crc kubenswrapper[4852]: I0129 11:04:37.997885 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5c4b86c744-rrhm8" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-api" containerID="cri-o://c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.000557 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_1be50193-e246-46b3-bc61-974f6b01b6e7/ovsdbserver-nb/0.log" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.000699 4852 generic.go:334] "Generic (PLEG): container finished" podID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerID="9caeeb9cc777b568b828a8350ae3044f5968f1db84c2eef74d143f409773f59a" exitCode=2 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.000717 4852 generic.go:334] "Generic (PLEG): container finished" podID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerID="2efbb95f790b03b0a620fc69f9cb3727ff67bc70ca3bd32306962a43832948f8" exitCode=143 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.000812 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerDied","Data":"9caeeb9cc777b568b828a8350ae3044f5968f1db84c2eef74d143f409773f59a"} Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.000837 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerDied","Data":"2efbb95f790b03b0a620fc69f9cb3727ff67bc70ca3bd32306962a43832948f8"} Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.018896 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-kbmbt"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.019682 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rxcgs_1d4860e9-e690-409f-bc12-86a1c51e6db1/openstack-network-exporter/0.log" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.019740 4852 generic.go:334] "Generic (PLEG): container finished" podID="1d4860e9-e690-409f-bc12-86a1c51e6db1" containerID="655c5a497b7193f8e8b2150d018d78c5d4b8e8338aa5d573bffd5c89cd5db084" exitCode=2 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.019793 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rxcgs" event={"ID":"1d4860e9-e690-409f-bc12-86a1c51e6db1","Type":"ContainerDied","Data":"655c5a497b7193f8e8b2150d018d78c5d4b8e8338aa5d573bffd5c89cd5db084"} Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.030126 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-kbmbt"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.040871 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b2c66fcd-07fb-42ea-8176-77a4627b3886/ovsdbserver-sb/0.log" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.040945 4852 generic.go:334] "Generic (PLEG): container finished" podID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerID="5a4569a0c66938bb15ae418dc4474095bccb853bb517e38eb2c06c8c521e60aa" exitCode=143 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.040984 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerDied","Data":"5a4569a0c66938bb15ae418dc4474095bccb853bb517e38eb2c06c8c521e60aa"} Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.041012 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b2c66fcd-07fb-42ea-8176-77a4627b3886","Type":"ContainerDied","Data":"10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e"} Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.041022 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10ea9ba2fe7a820363d41fc44e51f154ca9bbac1ccc1cd29f6b42f66bf4ce78e" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.047097 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.050830 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.050867 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.050880 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99e639e-b687-4552-bfa0-ed4391283aaf-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.050891 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzrdq\" (UniqueName: \"kubernetes.io/projected/b99e639e-b687-4552-bfa0-ed4391283aaf-kube-api-access-xzrdq\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.061818 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.069291 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.069768 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-server" containerID="cri-o://761316244eb0a26173100890d918cc7c1799abc67c0ec48e5c29ebc05dc7ed29" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070135 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="swift-recon-cron" containerID="cri-o://73859270b6703e319bb738155b60b5da8025987a2cbf2f4800261c79942db2e5" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070177 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="rsync" containerID="cri-o://ce1f7c6d351cc99e3313d53f3d8f5133e907d6c87aab097b279a18222b571462" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070214 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-expirer" containerID="cri-o://2fb8dded90eb3884703a4aa309816c0eff0cdf02427346e9d34e49253bf4d662" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070444 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-updater" containerID="cri-o://907bf322df4410dc6b7ff975343a603805ddab5bdea051b8b9a9717eb895ca80" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070484 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-auditor" containerID="cri-o://45528cdeb850649069e891209d9ca38b5ce8b5d0110cd02108c6b5f6abe281fc" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070562 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-replicator" containerID="cri-o://1e2b102b24f601e91c5e39ae16f39b46498693458c4fead72ba2b77aa8d49771" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070645 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-server" containerID="cri-o://dde3e5fe58352e0cc69a3b45408e08dda15923a3ecec816e83910494c6735af4" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070690 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-updater" containerID="cri-o://1da31bf3d6d70a3d5937cf86e2d07be3913f158a9179ef5de6c23c100b7e5517" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070722 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-auditor" containerID="cri-o://e7228e8da4a774db3b5290c30cdafed9405b8034f11e829f2c3ac803d946e4c3" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070725 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-reaper" containerID="cri-o://ac4d599adbc91bf89af64ad4f64d8683dca701a7383cd3db396529b7ca9ceeec" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070750 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-replicator" containerID="cri-o://7d958b8db8a90d2d6554c92f9750a01a932e5397d4686c338494a75d5e717c07" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070799 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-auditor" containerID="cri-o://91fe6b60918e96d60b8de169eade7e1727fc0ccc381141ea808831c289639ab8" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070853 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-replicator" containerID="cri-o://7d276e426302c43e751c21a713a36f854cb3218920539f9fed9da38f45d520ce" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.070904 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-server" containerID="cri-o://118a6805e58988df9a38c6169f10a35d7f949be36a831d95306796e4b1348a45" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.115810 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.125971 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b2c66fcd-07fb-42ea-8176-77a4627b3886/ovsdbserver-sb/0.log" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.126041 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.152953 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.153004 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.167364 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5699-account-create-update-wncww"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.192362 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5699-account-create-update-wncww"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.198131 4852 scope.go:117] "RemoveContainer" containerID="9b8c01c6c407ae7627a56b9f7843d60ca4bb8c2d21417edb2f07b4193f385d24" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.215036 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-7sq8p"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.220828 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "b99e639e-b687-4552-bfa0-ed4391283aaf" (UID: "b99e639e-b687-4552-bfa0-ed4391283aaf"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.242048 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-7sq8p"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.252517 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.254053 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255154 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255274 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255396 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255481 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255737 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255883 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smldg\" (UniqueName: \"kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.255964 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config\") pod \"b2c66fcd-07fb-42ea-8176-77a4627b3886\" (UID: \"b2c66fcd-07fb-42ea-8176-77a4627b3886\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.257554 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b99e639e-b687-4552-bfa0-ed4391283aaf-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.257709 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.257802 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data podName:1ab8189f-e95a-47b5-a130-5404901974e2 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:40.257787249 +0000 UTC m=+1377.475118383 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data") pod "rabbitmq-server-0" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2") : configmap "rabbitmq-config-data" not found Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.261295 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.265789 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config" (OuterVolumeSpecName: "config") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.268528 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts" (OuterVolumeSpecName: "scripts") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.277560 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg" (OuterVolumeSpecName: "kube-api-access-smldg") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "kube-api-access-smldg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.284424 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_1be50193-e246-46b3-bc61-974f6b01b6e7/ovsdbserver-nb/0.log" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.284488 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.306649 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.308347 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.358949 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.358978 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.359003 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.359013 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smldg\" (UniqueName: \"kubernetes.io/projected/b2c66fcd-07fb-42ea-8176-77a4627b3886-kube-api-access-smldg\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.359026 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2c66fcd-07fb-42ea-8176-77a4627b3886-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.374162 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.375085 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-log" containerID="cri-o://38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.376870 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="rabbitmq" containerID="cri-o://c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77" gracePeriod=604800 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.377227 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-api" containerID="cri-o://d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.418280 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.418869 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" containerID="cri-o://ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.446048 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-rnnzv"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.460442 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.462815 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.462940 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.462979 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.463007 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.463138 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.463184 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c44r\" (UniqueName: \"kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.463237 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config\") pod \"1be50193-e246-46b3-bc61-974f6b01b6e7\" (UID: \"1be50193-e246-46b3-bc61-974f6b01b6e7\") " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.464364 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config" (OuterVolumeSpecName: "config") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.474290 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.474668 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts" (OuterVolumeSpecName: "scripts") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.478079 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-rnnzv"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.480456 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.483167 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.508876 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-rhfbl"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.523188 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-rhfbl"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.534645 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-hjt4q"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.534680 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.534876 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-log" containerID="cri-o://d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.535220 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-metadata" containerID="cri-o://540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.536661 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-hjt4q"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.544270 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c0a7-account-create-update-rbbpm"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.558823 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c0a7-account-create-update-rbbpm"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.565241 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.565268 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1be50193-e246-46b3-bc61-974f6b01b6e7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.565277 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.565286 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.607315 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-j4njn"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.619630 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r" (OuterVolumeSpecName: "kube-api-access-2c44r") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "kube-api-access-2c44r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.619742 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.632832 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.660644 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-jmbw4"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.668144 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.668177 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c44r\" (UniqueName: \"kubernetes.io/projected/1be50193-e246-46b3-bc61-974f6b01b6e7-kube-api-access-2c44r\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.673453 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.683441 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-jmbw4"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.696411 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-j4njn"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.715193 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.721416 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-sdvb7"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.725437 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-a154-account-create-update-l267s"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.744862 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-sdvb7"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.752211 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-a154-account-create-update-l267s"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.761674 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.761966 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-log" containerID="cri-o://ce3f6c95a078dc3bea52fa656715a9c015827b3d5a2ef4d87997ac240eeab0e7" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.762409 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-httpd" containerID="cri-o://d56126df6c76ee4d2b57d1a9bfa70a3c707884469080624229983943f11c8570" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.771715 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.771788 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.780908 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.786632 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.787014 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-log" containerID="cri-o://d75357c4635ac4b27b487dfd891bbe8e4a30c70a0f91635d4c51f8b3a4c92c2b" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.787655 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-httpd" containerID="cri-o://496427d75669b53712641a841d59135547182e1cbb1fb27f1360eb43642abdda" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.804760 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.804977 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f4fbff985-ww2n4" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-api" containerID="cri-o://d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.805341 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f4fbff985-ww2n4" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-httpd" containerID="cri-o://82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.827677 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.849865 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-jqwsq"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.878088 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.878330 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7c548544bc-nwvzz" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-httpd" containerID="cri-o://43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.878765 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7c548544bc-nwvzz" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-server" containerID="cri-o://a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.883908 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" containerID="cri-o://856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" gracePeriod=29 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.905931 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="galera" containerID="cri-o://10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.919761 4852 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 11:04:38 crc kubenswrapper[4852]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 11:04:38 crc kubenswrapper[4852]: + source /usr/local/bin/container-scripts/functions Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNBridge=br-int Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNRemote=tcp:localhost:6642 Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNEncapType=geneve Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNAvailabilityZones= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ EnableChassisAsGateway=true Jan 29 11:04:38 crc kubenswrapper[4852]: ++ PhysicalNetworks= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNHostName= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 11:04:38 crc kubenswrapper[4852]: ++ ovs_dir=/var/lib/openvswitch Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 11:04:38 crc kubenswrapper[4852]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + sleep 0.5 Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + cleanup_ovsdb_server_semaphore Jan 29 11:04:38 crc kubenswrapper[4852]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 11:04:38 crc kubenswrapper[4852]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-k8pcs" message=< Jan 29 11:04:38 crc kubenswrapper[4852]: Exiting ovsdb-server (5) [ OK ] Jan 29 11:04:38 crc kubenswrapper[4852]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 11:04:38 crc kubenswrapper[4852]: + source /usr/local/bin/container-scripts/functions Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNBridge=br-int Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNRemote=tcp:localhost:6642 Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNEncapType=geneve Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNAvailabilityZones= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ EnableChassisAsGateway=true Jan 29 11:04:38 crc kubenswrapper[4852]: ++ PhysicalNetworks= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNHostName= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 11:04:38 crc kubenswrapper[4852]: ++ ovs_dir=/var/lib/openvswitch Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 11:04:38 crc kubenswrapper[4852]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + sleep 0.5 Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + cleanup_ovsdb_server_semaphore Jan 29 11:04:38 crc kubenswrapper[4852]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 11:04:38 crc kubenswrapper[4852]: > Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.920052 4852 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 11:04:38 crc kubenswrapper[4852]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 11:04:38 crc kubenswrapper[4852]: + source /usr/local/bin/container-scripts/functions Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNBridge=br-int Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNRemote=tcp:localhost:6642 Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNEncapType=geneve Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNAvailabilityZones= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ EnableChassisAsGateway=true Jan 29 11:04:38 crc kubenswrapper[4852]: ++ PhysicalNetworks= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ OVNHostName= Jan 29 11:04:38 crc kubenswrapper[4852]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 11:04:38 crc kubenswrapper[4852]: ++ ovs_dir=/var/lib/openvswitch Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 11:04:38 crc kubenswrapper[4852]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 11:04:38 crc kubenswrapper[4852]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + sleep 0.5 Jan 29 11:04:38 crc kubenswrapper[4852]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 11:04:38 crc kubenswrapper[4852]: + cleanup_ovsdb_server_semaphore Jan 29 11:04:38 crc kubenswrapper[4852]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 11:04:38 crc kubenswrapper[4852]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 11:04:38 crc kubenswrapper[4852]: > pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" containerID="cri-o://7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.920094 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" containerID="cri-o://7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" gracePeriod=29 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.928130 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.943474 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-jqwsq"] Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.953690 4852 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 11:04:38 crc kubenswrapper[4852]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: if [ -n "neutron" ]; then Jan 29 11:04:38 crc kubenswrapper[4852]: GRANT_DATABASE="neutron" Jan 29 11:04:38 crc kubenswrapper[4852]: else Jan 29 11:04:38 crc kubenswrapper[4852]: GRANT_DATABASE="*" Jan 29 11:04:38 crc kubenswrapper[4852]: fi Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: # going for maximum compatibility here: Jan 29 11:04:38 crc kubenswrapper[4852]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 11:04:38 crc kubenswrapper[4852]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 11:04:38 crc kubenswrapper[4852]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 11:04:38 crc kubenswrapper[4852]: # support updates Jan 29 11:04:38 crc kubenswrapper[4852]: Jan 29 11:04:38 crc kubenswrapper[4852]: $MYSQL_CMD < logger="UnhandledError" Jan 29 11:04:38 crc kubenswrapper[4852]: E0129 11:04:38.956760 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-4bcf-account-create-update-q7t5b" podUID="6ff8124a-5795-45b2-ae2c-18f779e7da1e" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.957680 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.967390 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.980180 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.980215 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.981559 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.981789 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-66d6b946b9-8qp8x" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker-log" containerID="cri-o://a1df71499e1fd5786e8f8ce1f972f8c5cdb23d2e76c158581681aaf76b9972b9" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.982077 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-66d6b946b9-8qp8x" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker" containerID="cri-o://8366b4f53588b202defa86ab08279ed8a31501c51a1b057517aab806551203c0" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.989257 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.989593 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener-log" containerID="cri-o://430bb8180e977735b3830ae9b80ddb4c04224564e891d55f48ab0ca2914dbb58" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.989640 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener" containerID="cri-o://aa5685bc516d6db550494e4d88ddd8fe48813b31062f4470210e7804ed6c4c11" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: W0129 11:04:38.995306 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77915ad3_b974_4266_ac52_4f81873d744c.slice/crio-5749448c6e980a1e59aa2b47002608b3b2daab43b1f748ef9cdd1245c77e00e7 WatchSource:0}: Error finding container 5749448c6e980a1e59aa2b47002608b3b2daab43b1f748ef9cdd1245c77e00e7: Status 404 returned error can't find the container with id 5749448c6e980a1e59aa2b47002608b3b2daab43b1f748ef9cdd1245c77e00e7 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.997189 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.997369 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-588c766876-422z6" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api-log" containerID="cri-o://32ee64127b13aab5bf090c833967ae28b8675a965dc8eb24e8e5d01c8cd166f4" gracePeriod=30 Jan 29 11:04:38 crc kubenswrapper[4852]: I0129 11:04:38.997687 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-588c766876-422z6" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api" containerID="cri-o://02a21826da03d176e3af7a859639e921f42ebe93b8e3d176115585c9b0fb9752" gracePeriod=30 Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.000113 4852 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 11:04:39 crc kubenswrapper[4852]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: if [ -n "glance" ]; then Jan 29 11:04:39 crc kubenswrapper[4852]: GRANT_DATABASE="glance" Jan 29 11:04:39 crc kubenswrapper[4852]: else Jan 29 11:04:39 crc kubenswrapper[4852]: GRANT_DATABASE="*" Jan 29 11:04:39 crc kubenswrapper[4852]: fi Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: # going for maximum compatibility here: Jan 29 11:04:39 crc kubenswrapper[4852]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 11:04:39 crc kubenswrapper[4852]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 11:04:39 crc kubenswrapper[4852]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 11:04:39 crc kubenswrapper[4852]: # support updates Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: $MYSQL_CMD < logger="UnhandledError" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.001990 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-cfee-account-create-update-8f8sh" podUID="77915ad3-b974-4266-ac52-4f81873d744c" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.004940 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-v5wld"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.027427 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.027928 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" containerID="cri-o://709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" gracePeriod=30 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.030768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.030796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.055057 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="rabbitmq" containerID="cri-o://f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6" gracePeriod=604800 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.070704 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "b2c66fcd-07fb-42ea-8176-77a4627b3886" (UID: "b2c66fcd-07fb-42ea-8176-77a4627b3886"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.073880 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-v5wld"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.080703 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-nt5bd"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.083236 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.083269 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c66fcd-07fb-42ea-8176-77a4627b3886-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.083281 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.087695 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-nt5bd"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.092349 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.092845 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" containerName="nova-cell1-conductor-conductor" containerID="cri-o://ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" gracePeriod=30 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.100336 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.101248 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b" gracePeriod=30 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.101824 4852 generic.go:334] "Generic (PLEG): container finished" podID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerID="82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.101871 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerDied","Data":"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.104601 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "1be50193-e246-46b3-bc61-974f6b01b6e7" (UID: "1be50193-e246-46b3-bc61-974f6b01b6e7"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.109604 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.114362 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.125180 4852 generic.go:334] "Generic (PLEG): container finished" podID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerID="38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.125326 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerDied","Data":"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.147232 4852 generic.go:334] "Generic (PLEG): container finished" podID="e967d95c-8de4-4167-82ef-1b32f6026476" containerID="5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.147515 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerDied","Data":"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.158276 4852 generic.go:334] "Generic (PLEG): container finished" podID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerID="d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.158363 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerDied","Data":"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.171837 4852 generic.go:334] "Generic (PLEG): container finished" podID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerID="907e93ef39a4a6500cbf037ce8f0712f50de707858f25b39621cba63db775ba4" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.171937 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" event={"ID":"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9","Type":"ContainerDied","Data":"907e93ef39a4a6500cbf037ce8f0712f50de707858f25b39621cba63db775ba4"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.171971 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" event={"ID":"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9","Type":"ContainerDied","Data":"ec03b195d85f021c20a6e6bf903bc579aae9cdcf0cf2d2282e66b86785fc6a2d"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.171986 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec03b195d85f021c20a6e6bf903bc579aae9cdcf0cf2d2282e66b86785fc6a2d" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.173916 4852 generic.go:334] "Generic (PLEG): container finished" podID="20516ac5-463a-4d2c-a442-d74254876ddf" containerID="9e2fbd4b9daaa437f8f321cc9702964e5924353cd1b5d954ec556bcd5e7b8cfd" exitCode=137 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.173989 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64d65ecc7347d41002e70f40763ad11f097d63e35d433683aa217affd647c32c" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.179550 4852 generic.go:334] "Generic (PLEG): container finished" podID="65f47530-2db1-46a2-84fa-dde28af57083" containerID="6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.179615 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerDied","Data":"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.181616 4852 generic.go:334] "Generic (PLEG): container finished" podID="232868d3-4c67-4820-b75c-e90009acf440" containerID="d75357c4635ac4b27b487dfd891bbe8e4a30c70a0f91635d4c51f8b3a4c92c2b" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.181673 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerDied","Data":"d75357c4635ac4b27b487dfd891bbe8e4a30c70a0f91635d4c51f8b3a4c92c2b"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.182443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" event={"ID":"5185a393-4eff-496b-bd08-a8a91ada2a17","Type":"ContainerStarted","Data":"80cb150bb3c91969e2dfb2f5372772dcbf95f7dd36544c2141f10c822aa573e1"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.182938 4852 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/nova-cell1-5040-account-create-update-jmf7z" secret="" err="secret \"galera-openstack-cell1-dockercfg-7l7vf\" not found" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.184747 4852 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 11:04:39 crc kubenswrapper[4852]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: if [ -n "nova_cell1" ]; then Jan 29 11:04:39 crc kubenswrapper[4852]: GRANT_DATABASE="nova_cell1" Jan 29 11:04:39 crc kubenswrapper[4852]: else Jan 29 11:04:39 crc kubenswrapper[4852]: GRANT_DATABASE="*" Jan 29 11:04:39 crc kubenswrapper[4852]: fi Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: # going for maximum compatibility here: Jan 29 11:04:39 crc kubenswrapper[4852]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 11:04:39 crc kubenswrapper[4852]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 11:04:39 crc kubenswrapper[4852]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 11:04:39 crc kubenswrapper[4852]: # support updates Jan 29 11:04:39 crc kubenswrapper[4852]: Jan 29 11:04:39 crc kubenswrapper[4852]: $MYSQL_CMD < logger="UnhandledError" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.185156 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1be50193-e246-46b3-bc61-974f6b01b6e7-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.185808 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" podUID="5185a393-4eff-496b-bd08-a8a91ada2a17" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.186601 4852 generic.go:334] "Generic (PLEG): container finished" podID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.186657 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerDied","Data":"7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201295 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="ce1f7c6d351cc99e3313d53f3d8f5133e907d6c87aab097b279a18222b571462" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201330 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="2fb8dded90eb3884703a4aa309816c0eff0cdf02427346e9d34e49253bf4d662" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201344 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"ce1f7c6d351cc99e3313d53f3d8f5133e907d6c87aab097b279a18222b571462"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201341 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="907bf322df4410dc6b7ff975343a603805ddab5bdea051b8b9a9717eb895ca80" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201391 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"2fb8dded90eb3884703a4aa309816c0eff0cdf02427346e9d34e49253bf4d662"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201396 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="45528cdeb850649069e891209d9ca38b5ce8b5d0110cd02108c6b5f6abe281fc" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201405 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"907bf322df4410dc6b7ff975343a603805ddab5bdea051b8b9a9717eb895ca80"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201408 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="1e2b102b24f601e91c5e39ae16f39b46498693458c4fead72ba2b77aa8d49771" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201414 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"45528cdeb850649069e891209d9ca38b5ce8b5d0110cd02108c6b5f6abe281fc"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201420 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="dde3e5fe58352e0cc69a3b45408e08dda15923a3ecec816e83910494c6735af4" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201424 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"1e2b102b24f601e91c5e39ae16f39b46498693458c4fead72ba2b77aa8d49771"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201437 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"dde3e5fe58352e0cc69a3b45408e08dda15923a3ecec816e83910494c6735af4"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201445 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"1da31bf3d6d70a3d5937cf86e2d07be3913f158a9179ef5de6c23c100b7e5517"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201428 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="1da31bf3d6d70a3d5937cf86e2d07be3913f158a9179ef5de6c23c100b7e5517" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201775 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="91fe6b60918e96d60b8de169eade7e1727fc0ccc381141ea808831c289639ab8" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201815 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="7d276e426302c43e751c21a713a36f854cb3218920539f9fed9da38f45d520ce" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201828 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="118a6805e58988df9a38c6169f10a35d7f949be36a831d95306796e4b1348a45" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201836 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="ac4d599adbc91bf89af64ad4f64d8683dca701a7383cd3db396529b7ca9ceeec" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201844 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="e7228e8da4a774db3b5290c30cdafed9405b8034f11e829f2c3ac803d946e4c3" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201861 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="7d958b8db8a90d2d6554c92f9750a01a932e5397d4686c338494a75d5e717c07" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.201870 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="761316244eb0a26173100890d918cc7c1799abc67c0ec48e5c29ebc05dc7ed29" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202038 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"91fe6b60918e96d60b8de169eade7e1727fc0ccc381141ea808831c289639ab8"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202107 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"7d276e426302c43e751c21a713a36f854cb3218920539f9fed9da38f45d520ce"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202123 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"118a6805e58988df9a38c6169f10a35d7f949be36a831d95306796e4b1348a45"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202164 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"ac4d599adbc91bf89af64ad4f64d8683dca701a7383cd3db396529b7ca9ceeec"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202184 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"e7228e8da4a774db3b5290c30cdafed9405b8034f11e829f2c3ac803d946e4c3"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202198 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"7d958b8db8a90d2d6554c92f9750a01a932e5397d4686c338494a75d5e717c07"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.202210 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"761316244eb0a26173100890d918cc7c1799abc67c0ec48e5c29ebc05dc7ed29"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.206019 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cjmvw" event={"ID":"4dab3827-3a1d-45f0-a021-d0ef66b37f0a","Type":"ContainerStarted","Data":"8d79ffcbd42e91fe782c0afbe62ab3812addcd453e6264aad0f1a2ed4898b268"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.209666 4852 generic.go:334] "Generic (PLEG): container finished" podID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerID="115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f" exitCode=0 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.209740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerDied","Data":"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.217958 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rxcgs_1d4860e9-e690-409f-bc12-86a1c51e6db1/openstack-network-exporter/0.log" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.218110 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rxcgs" event={"ID":"1d4860e9-e690-409f-bc12-86a1c51e6db1","Type":"ContainerDied","Data":"32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.218149 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32b63bf47d79040f66ea22af0dbffbc79d1030aa465d379aeb26fa0baa77f8df" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.220269 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cfee-account-create-update-8f8sh" event={"ID":"77915ad3-b974-4266-ac52-4f81873d744c","Type":"ContainerStarted","Data":"5749448c6e980a1e59aa2b47002608b3b2daab43b1f748ef9cdd1245c77e00e7"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.225338 4852 generic.go:334] "Generic (PLEG): container finished" podID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerID="ce3f6c95a078dc3bea52fa656715a9c015827b3d5a2ef4d87997ac240eeab0e7" exitCode=143 Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.225371 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerDied","Data":"ce3f6c95a078dc3bea52fa656715a9c015827b3d5a2ef4d87997ac240eeab0e7"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.245515 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_1be50193-e246-46b3-bc61-974f6b01b6e7/ovsdbserver-nb/0.log" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.245606 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1be50193-e246-46b3-bc61-974f6b01b6e7","Type":"ContainerDied","Data":"ed5f205a6c1838d88c524adbcf1bc4a5c44cf83cda919ceb06f9b0947f601d7b"} Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.245642 4852 scope.go:117] "RemoveContainer" containerID="9caeeb9cc777b568b828a8350ae3044f5968f1db84c2eef74d143f409773f59a" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.245811 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.257019 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.257732 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4bcf-account-create-update-q7t5b" event={"ID":"6ff8124a-5795-45b2-ae2c-18f779e7da1e","Type":"ContainerStarted","Data":"9b5ed2a0f37e1850510a124fa2fc8b001a4d939c84722fbad292b68526b91629"} Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.286798 4852 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.286867 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts podName:5185a393-4eff-496b-bd08-a8a91ada2a17 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:39.786849611 +0000 UTC m=+1377.004180745 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts") pod "nova-cell1-5040-account-create-update-jmf7z" (UID: "5185a393-4eff-496b-bd08-a8a91ada2a17") : configmap "openstack-cell1-scripts" not found Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.311868 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.328423 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.333865 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rxcgs_1d4860e9-e690-409f-bc12-86a1c51e6db1/openstack-network-exporter/0.log" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.334061 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.336697 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.371548 4852 scope.go:117] "RemoveContainer" containerID="2efbb95f790b03b0a620fc69f9cb3727ff67bc70ca3bd32306962a43832948f8" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.386993 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.389764 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.389849 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.389879 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.389908 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.389967 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf8nq\" (UniqueName: \"kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390069 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390082 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390106 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390148 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390188 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390225 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkls2\" (UniqueName: \"kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2\") pod \"1d4860e9-e690-409f-bc12-86a1c51e6db1\" (UID: \"1d4860e9-e690-409f-bc12-86a1c51e6db1\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390280 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.390342 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config\") pod \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\" (UID: \"e8e8bb0a-9bde-4d34-9306-60c6223cf8b9\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.391305 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.391658 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.407104 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config" (OuterVolumeSpecName: "config") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.407183 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.418651 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.420857 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq" (OuterVolumeSpecName: "kube-api-access-kf8nq") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "kube-api-access-kf8nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.424465 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2" (OuterVolumeSpecName: "kube-api-access-hkls2") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "kube-api-access-hkls2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.447461 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.480946 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0333efd0-887f-4c8f-a25f-b6e7f56068c0" path="/var/lib/kubelet/pods/0333efd0-887f-4c8f-a25f-b6e7f56068c0/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.481893 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075426ec-6034-40e1-9449-bfec69b8e991" path="/var/lib/kubelet/pods/075426ec-6034-40e1-9449-bfec69b8e991/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.482661 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09e692ef-fb62-44d3-8b88-09fa15eaae6f" path="/var/lib/kubelet/pods/09e692ef-fb62-44d3-8b88-09fa15eaae6f/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.483520 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e845226-3c75-48d0-9fbc-a22e885dac4d" path="/var/lib/kubelet/pods/0e845226-3c75-48d0-9fbc-a22e885dac4d/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.484954 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" path="/var/lib/kubelet/pods/1be50193-e246-46b3-bc61-974f6b01b6e7/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.485749 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29200a98-1d5c-4122-bfba-d66f4b12b5e0" path="/var/lib/kubelet/pods/29200a98-1d5c-4122-bfba-d66f4b12b5e0/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.486947 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3370d9c7-8c64-443a-82fe-b03172ce44e4" path="/var/lib/kubelet/pods/3370d9c7-8c64-443a-82fe-b03172ce44e4/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.488090 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="487b0984-bae1-4730-bfd0-afe920ec974e" path="/var/lib/kubelet/pods/487b0984-bae1-4730-bfd0-afe920ec974e/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.490605 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f5dfb94-d484-48d3-ab84-70c647e30d2e" path="/var/lib/kubelet/pods/6f5dfb94-d484-48d3-ab84-70c647e30d2e/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.492267 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8071177d-5f88-4cda-a3a8-b36eb0807a64" path="/var/lib/kubelet/pods/8071177d-5f88-4cda-a3a8-b36eb0807a64/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.492297 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle\") pod \"20516ac5-463a-4d2c-a442-d74254876ddf\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.492438 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72nfw\" (UniqueName: \"kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw\") pod \"20516ac5-463a-4d2c-a442-d74254876ddf\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.492492 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config\") pod \"20516ac5-463a-4d2c-a442-d74254876ddf\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.492551 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret\") pod \"20516ac5-463a-4d2c-a442-d74254876ddf\" (UID: \"20516ac5-463a-4d2c-a442-d74254876ddf\") " Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.493796 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d4860e9-e690-409f-bc12-86a1c51e6db1-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.493821 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.493835 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkls2\" (UniqueName: \"kubernetes.io/projected/1d4860e9-e690-409f-bc12-86a1c51e6db1-kube-api-access-hkls2\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.493846 4852 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1d4860e9-e690-409f-bc12-86a1c51e6db1-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.493857 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf8nq\" (UniqueName: \"kubernetes.io/projected/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-kube-api-access-kf8nq\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.497211 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eb503ba-654f-4997-8581-d2ea619a7b68" path="/var/lib/kubelet/pods/8eb503ba-654f-4997-8581-d2ea619a7b68/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.497815 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dcddeb0-9a29-43ba-a4a3-50c920d2603f" path="/var/lib/kubelet/pods/9dcddeb0-9a29-43ba-a4a3-50c920d2603f/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.498313 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a25d15a2-cada-4765-bd31-40f3e42a0edb" path="/var/lib/kubelet/pods/a25d15a2-cada-4765-bd31-40f3e42a0edb/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.502362 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" path="/var/lib/kubelet/pods/b2c66fcd-07fb-42ea-8176-77a4627b3886/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.502761 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw" (OuterVolumeSpecName: "kube-api-access-72nfw") pod "20516ac5-463a-4d2c-a442-d74254876ddf" (UID: "20516ac5-463a-4d2c-a442-d74254876ddf"). InnerVolumeSpecName "kube-api-access-72nfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.515525 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" path="/var/lib/kubelet/pods/b99e639e-b687-4552-bfa0-ed4391283aaf/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.516542 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config" (OuterVolumeSpecName: "config") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.528086 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d010e854-5e87-4a27-b194-c3ced771d680" path="/var/lib/kubelet/pods/d010e854-5e87-4a27-b194-c3ced771d680/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.528785 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e67bd911-5506-4636-8b80-9f5a73e0c99f" path="/var/lib/kubelet/pods/e67bd911-5506-4636-8b80-9f5a73e0c99f/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.529438 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8062643-e2e7-45e4-aab3-e39d07b2946c" path="/var/lib/kubelet/pods/e8062643-e2e7-45e4-aab3-e39d07b2946c/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.536619 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8eaa1c2-1b96-43b1-ba67-522014312ee6" path="/var/lib/kubelet/pods/e8eaa1c2-1b96-43b1-ba67-522014312ee6/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.537513 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f" path="/var/lib/kubelet/pods/ec7f5eee-a5c1-49b6-9eeb-69cc72356f0f/volumes" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.545147 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.550349 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.550658 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.552748 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.554234 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.554287 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.565089 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.588621 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.592477 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.594145 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595450 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72nfw\" (UniqueName: \"kubernetes.io/projected/20516ac5-463a-4d2c-a442-d74254876ddf-kube-api-access-72nfw\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595472 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595481 4852 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595490 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595500 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.595850 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" (UID: "e8e8bb0a-9bde-4d34-9306-60c6223cf8b9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.596322 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="galera" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.635674 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20516ac5-463a-4d2c-a442-d74254876ddf" (UID: "20516ac5-463a-4d2c-a442-d74254876ddf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.640694 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "20516ac5-463a-4d2c-a442-d74254876ddf" (UID: "20516ac5-463a-4d2c-a442-d74254876ddf"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.662528 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "20516ac5-463a-4d2c-a442-d74254876ddf" (UID: "20516ac5-463a-4d2c-a442-d74254876ddf"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.670516 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1d4860e9-e690-409f-bc12-86a1c51e6db1" (UID: "1d4860e9-e690-409f-bc12-86a1c51e6db1"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.706993 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.707035 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.707045 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/20516ac5-463a-4d2c-a442-d74254876ddf-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.707055 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.707064 4852 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d4860e9-e690-409f-bc12-86a1c51e6db1-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.761923 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.768775 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.775748 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.775803 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.809119 4852 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 11:04:39 crc kubenswrapper[4852]: E0129 11:04:39.809194 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts podName:5185a393-4eff-496b-bd08-a8a91ada2a17 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:40.809179412 +0000 UTC m=+1378.026510546 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts") pod "nova-cell1-5040-account-create-update-jmf7z" (UID: "5185a393-4eff-496b-bd08-a8a91ada2a17") : configmap "openstack-cell1-scripts" not found Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.893545 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.927247 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.942557 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:39 crc kubenswrapper[4852]: I0129 11:04:39.967748 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.013445 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.018870 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.018917 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.018941 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.018966 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.018986 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019015 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stw64\" (UniqueName: \"kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64\") pod \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019044 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019080 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts\") pod \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\" (UID: \"6ff8124a-5795-45b2-ae2c-18f779e7da1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019121 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019162 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019202 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbslw\" (UniqueName: \"kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019217 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts\") pod \"b7fa8168-5031-44e4-9e06-03b2cda941f5\" (UID: \"b7fa8168-5031-44e4-9e06-03b2cda941f5\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019237 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78zt7\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019255 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zndc8\" (UniqueName: \"kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8\") pod \"77915ad3-b974-4266-ac52-4f81873d744c\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019310 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts\") pod \"77915ad3-b974-4266-ac52-4f81873d744c\" (UID: \"77915ad3-b974-4266-ac52-4f81873d744c\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019332 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019349 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019416 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift\") pod \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\" (UID: \"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019421 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019764 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.019824 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.019864 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data podName:f84fb26d-e835-4d75-95d5-695b6e033bb7 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:44.019852145 +0000 UTC m=+1381.237183279 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7") : configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.019950 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.020210 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "77915ad3-b974-4266-ac52-4f81873d744c" (UID: "77915ad3-b974-4266-ac52-4f81873d744c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.022617 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.025514 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw" (OuterVolumeSpecName: "kube-api-access-vbslw") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "kube-api-access-vbslw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.026645 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6ff8124a-5795-45b2-ae2c-18f779e7da1e" (UID: "6ff8124a-5795-45b2-ae2c-18f779e7da1e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.026898 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7" (OuterVolumeSpecName: "kube-api-access-78zt7") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "kube-api-access-78zt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.027855 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.029262 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64" (OuterVolumeSpecName: "kube-api-access-stw64") pod "6ff8124a-5795-45b2-ae2c-18f779e7da1e" (UID: "6ff8124a-5795-45b2-ae2c-18f779e7da1e"). InnerVolumeSpecName "kube-api-access-stw64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.030369 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts" (OuterVolumeSpecName: "scripts") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.032217 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8" (OuterVolumeSpecName: "kube-api-access-zndc8") pod "77915ad3-b974-4266-ac52-4f81873d744c" (UID: "77915ad3-b974-4266-ac52-4f81873d744c"). InnerVolumeSpecName "kube-api-access-zndc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.041494 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.091820 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.094833 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.101748 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data" (OuterVolumeSpecName: "config-data") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.116337 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.118268 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" (UID: "5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.120292 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs\") pod \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.120536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data\") pod \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.120616 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs\") pod \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.120681 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle\") pod \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.120794 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zrx6\" (UniqueName: \"kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6\") pod \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\" (UID: \"932e2969-4638-44c5-94f3-bb07c5fd4a8f\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121290 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121310 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121320 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b7fa8168-5031-44e4-9e06-03b2cda941f5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121474 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stw64\" (UniqueName: \"kubernetes.io/projected/6ff8124a-5795-45b2-ae2c-18f779e7da1e-kube-api-access-stw64\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121545 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121558 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ff8124a-5795-45b2-ae2c-18f779e7da1e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121567 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121588 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121596 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbslw\" (UniqueName: \"kubernetes.io/projected/b7fa8168-5031-44e4-9e06-03b2cda941f5-kube-api-access-vbslw\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121605 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121613 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78zt7\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-kube-api-access-78zt7\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121621 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zndc8\" (UniqueName: \"kubernetes.io/projected/77915ad3-b974-4266-ac52-4f81873d744c-kube-api-access-zndc8\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121631 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77915ad3-b974-4266-ac52-4f81873d744c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121639 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121647 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.121654 4852 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.125597 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6" (OuterVolumeSpecName: "kube-api-access-7zrx6") pod "932e2969-4638-44c5-94f3-bb07c5fd4a8f" (UID: "932e2969-4638-44c5-94f3-bb07c5fd4a8f"). InnerVolumeSpecName "kube-api-access-7zrx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.159745 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "932e2969-4638-44c5-94f3-bb07c5fd4a8f" (UID: "932e2969-4638-44c5-94f3-bb07c5fd4a8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.160409 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data" (OuterVolumeSpecName: "config-data") pod "932e2969-4638-44c5-94f3-bb07c5fd4a8f" (UID: "932e2969-4638-44c5-94f3-bb07c5fd4a8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.163336 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data" (OuterVolumeSpecName: "config-data") pod "b7fa8168-5031-44e4-9e06-03b2cda941f5" (UID: "b7fa8168-5031-44e4-9e06-03b2cda941f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.169202 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "932e2969-4638-44c5-94f3-bb07c5fd4a8f" (UID: "932e2969-4638-44c5-94f3-bb07c5fd4a8f"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.199443 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "932e2969-4638-44c5-94f3-bb07c5fd4a8f" (UID: "932e2969-4638-44c5-94f3-bb07c5fd4a8f"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223122 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223161 4852 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223170 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223178 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zrx6\" (UniqueName: \"kubernetes.io/projected/932e2969-4638-44c5-94f3-bb07c5fd4a8f-kube-api-access-7zrx6\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223187 4852 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/932e2969-4638-44c5-94f3-bb07c5fd4a8f-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.223196 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fa8168-5031-44e4-9e06-03b2cda941f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.270905 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-cfee-account-create-update-8f8sh" event={"ID":"77915ad3-b974-4266-ac52-4f81873d744c","Type":"ContainerDied","Data":"5749448c6e980a1e59aa2b47002608b3b2daab43b1f748ef9cdd1245c77e00e7"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.271008 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-cfee-account-create-update-8f8sh" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.275821 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerID="a1df71499e1fd5786e8f8ce1f972f8c5cdb23d2e76c158581681aaf76b9972b9" exitCode=143 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.275923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerDied","Data":"a1df71499e1fd5786e8f8ce1f972f8c5cdb23d2e76c158581681aaf76b9972b9"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.278808 4852 generic.go:334] "Generic (PLEG): container finished" podID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" containerID="b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b" exitCode=0 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.278868 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"932e2969-4638-44c5-94f3-bb07c5fd4a8f","Type":"ContainerDied","Data":"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.278905 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"932e2969-4638-44c5-94f3-bb07c5fd4a8f","Type":"ContainerDied","Data":"dafd3398dea98e303a224b79b36a63e5d6fc98e9f36dd66db464d941d2aa66d7"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.278927 4852 scope.go:117] "RemoveContainer" containerID="b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.279060 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.286173 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4bcf-account-create-update-q7t5b" event={"ID":"6ff8124a-5795-45b2-ae2c-18f779e7da1e","Type":"ContainerDied","Data":"9b5ed2a0f37e1850510a124fa2fc8b001a4d939c84722fbad292b68526b91629"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.286260 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4bcf-account-create-update-q7t5b" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.294376 4852 generic.go:334] "Generic (PLEG): container finished" podID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerID="5cbe70cc3cf2a37eac0f5bb2c176c884849bf9df7328d79df88a293c62b5f0fd" exitCode=1 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.294446 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cjmvw" event={"ID":"4dab3827-3a1d-45f0-a021-d0ef66b37f0a","Type":"ContainerDied","Data":"5cbe70cc3cf2a37eac0f5bb2c176c884849bf9df7328d79df88a293c62b5f0fd"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.296865 4852 scope.go:117] "RemoveContainer" containerID="5cbe70cc3cf2a37eac0f5bb2c176c884849bf9df7328d79df88a293c62b5f0fd" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.300553 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.301133 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerID="430bb8180e977735b3830ae9b80ddb4c04224564e891d55f48ab0ca2914dbb58" exitCode=143 Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.301163 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.301191 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerDied","Data":"430bb8180e977735b3830ae9b80ddb4c04224564e891d55f48ab0ca2914dbb58"} Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.301624 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.301653 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.306336 4852 generic.go:334] "Generic (PLEG): container finished" podID="e5832629-fcd6-441c-a349-f771c099f7b4" containerID="10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" exitCode=0 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.306386 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerDied","Data":"10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49"} Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.312634 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.319501 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.321058 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.321115 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321507 4852 generic.go:334] "Generic (PLEG): container finished" podID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerID="a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" exitCode=0 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321536 4852 generic.go:334] "Generic (PLEG): container finished" podID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerID="43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" exitCode=0 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321615 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerDied","Data":"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321649 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerDied","Data":"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321663 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7c548544bc-nwvzz" event={"ID":"5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e","Type":"ContainerDied","Data":"9bd8b6960e80544de08980856dd9d13dda7069a61b4380365cdda0466cd063a6"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.321870 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7c548544bc-nwvzz" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.331188 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.331299 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data podName:1ab8189f-e95a-47b5-a130-5404901974e2 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:44.331274638 +0000 UTC m=+1381.548605772 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data") pod "rabbitmq-server-0" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2") : configmap "rabbitmq-config-data" not found Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.332793 4852 generic.go:334] "Generic (PLEG): container finished" podID="0bd5133c-258f-4725-9da4-17941a408af8" containerID="32ee64127b13aab5bf090c833967ae28b8675a965dc8eb24e8e5d01c8cd166f4" exitCode=143 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.332866 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerDied","Data":"32ee64127b13aab5bf090c833967ae28b8675a965dc8eb24e8e5d01c8cd166f4"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.345389 4852 scope.go:117] "RemoveContainer" containerID="b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.347829 4852 generic.go:334] "Generic (PLEG): container finished" podID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerID="fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11" exitCode=0 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.347951 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-z48fn" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348311 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.348301 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b\": container with ID starting with b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b not found: ID does not exist" containerID="b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.347997 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerDied","Data":"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348373 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b"} err="failed to get container status \"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b\": rpc error: code = NotFound desc = could not find container \"b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b\": container with ID starting with b23976b744f27e0eb501eb001182133c6bc970803dc52b9db21483dd259a345b not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348404 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b7fa8168-5031-44e4-9e06-03b2cda941f5","Type":"ContainerDied","Data":"4e01e660e75412bf1c9a61121fcc3f86002df35ca14e523698899bbd731d8c9f"} Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348411 4852 scope.go:117] "RemoveContainer" containerID="a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348502 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rxcgs" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.348887 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.376020 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.390647 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-cfee-account-create-update-8f8sh"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.398530 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.414475 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.431611 4852 scope.go:117] "RemoveContainer" containerID="43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.442367 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.491469 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4bcf-account-create-update-q7t5b"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.542759 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.542812 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-rxcgs"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.554568 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.560939 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.567678 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.600766 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-z48fn"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.630704 4852 scope.go:117] "RemoveContainer" containerID="a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.634852 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca\": container with ID starting with a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca not found: ID does not exist" containerID="a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.634888 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca"} err="failed to get container status \"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca\": rpc error: code = NotFound desc = could not find container \"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca\": container with ID starting with a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.634914 4852 scope.go:117] "RemoveContainer" containerID="43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.637172 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.637749 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb\": container with ID starting with 43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb not found: ID does not exist" containerID="43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.637843 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb"} err="failed to get container status \"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb\": rpc error: code = NotFound desc = could not find container \"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb\": container with ID starting with 43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.637955 4852 scope.go:117] "RemoveContainer" containerID="a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.641695 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca"} err="failed to get container status \"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca\": rpc error: code = NotFound desc = could not find container \"a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca\": container with ID starting with a6546c94bfca150ebac86fca6a9c29eb655e4df9e8760b708d5856b4ec7186ca not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.641730 4852 scope.go:117] "RemoveContainer" containerID="43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.646045 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb"} err="failed to get container status \"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb\": rpc error: code = NotFound desc = could not find container \"43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb\": container with ID starting with 43143a6b31acece672ee8ab31b2f6447d12ad5cd90e17c80a7ae66a9fdf723fb not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.646110 4852 scope.go:117] "RemoveContainer" containerID="115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.679934 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-7c548544bc-nwvzz"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.744812 4852 scope.go:117] "RemoveContainer" containerID="fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.777343 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.785328 4852 scope.go:117] "RemoveContainer" containerID="115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.785858 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f\": container with ID starting with 115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f not found: ID does not exist" containerID="115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.785882 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f"} err="failed to get container status \"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f\": rpc error: code = NotFound desc = could not find container \"115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f\": container with ID starting with 115e402fc9c979c29637177632656fce75bc562cb1b69454412eab483bb2648f not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.785902 4852 scope.go:117] "RemoveContainer" containerID="fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11" Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.786127 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11\": container with ID starting with fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11 not found: ID does not exist" containerID="fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.786150 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11"} err="failed to get container status \"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11\": rpc error: code = NotFound desc = could not find container \"fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11\": container with ID starting with fc434a78ea99d756958773083108d99dfcfc84eb35d4ed223621590c18608f11 not found: ID does not exist" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.850865 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.850921 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.850957 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.851038 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.851144 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.851187 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nkv9\" (UniqueName: \"kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.851248 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.851312 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default\") pod \"e5832629-fcd6-441c-a349-f771c099f7b4\" (UID: \"e5832629-fcd6-441c-a349-f771c099f7b4\") " Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.851852 4852 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 11:04:40 crc kubenswrapper[4852]: E0129 11:04:40.851919 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts podName:5185a393-4eff-496b-bd08-a8a91ada2a17 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:42.851890848 +0000 UTC m=+1380.069221982 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts") pod "nova-cell1-5040-account-create-update-jmf7z" (UID: "5185a393-4eff-496b-bd08-a8a91ada2a17") : configmap "openstack-cell1-scripts" not found Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.852821 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.853412 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.854917 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.856629 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.870821 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9" (OuterVolumeSpecName: "kube-api-access-6nkv9") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "kube-api-access-6nkv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.883499 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.890768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.918335 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "e5832629-fcd6-441c-a349-f771c099f7b4" (UID: "e5832629-fcd6-441c-a349-f771c099f7b4"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.940708 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.940973 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-central-agent" containerID="cri-o://d816637dddc4a3042b5748abee104ea2ba5b25b7a148418ca6e26b6a15ced4dc" gracePeriod=30 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.941067 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="sg-core" containerID="cri-o://fce074b5c8c23ae7e6f2429e922beae69e6f5e352d49342fb4993c72c8e442cd" gracePeriod=30 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.941161 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="proxy-httpd" containerID="cri-o://3fba4011047b82105d23692b2e3c2f2018cbd57dcbd733255f9715aaff21b5b9" gracePeriod=30 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.941244 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-notification-agent" containerID="cri-o://b41dd88e530d0078eac9379893c00ca26e6907c39d4da306037e9f16ecf118b4" gracePeriod=30 Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.947484 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954499 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954531 4852 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954542 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nkv9\" (UniqueName: \"kubernetes.io/projected/e5832629-fcd6-441c-a349-f771c099f7b4-kube-api-access-6nkv9\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954551 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954559 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954567 4852 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954575 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5832629-fcd6-441c-a349-f771c099f7b4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.954599 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5832629-fcd6-441c-a349-f771c099f7b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:40 crc kubenswrapper[4852]: I0129 11:04:40.991973 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.067640 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.068148 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f6b14960-4f7f-465e-8e53-96a14875878e" containerName="kube-state-metrics" containerID="cri-o://40738ebf6fcb34e9c873aac76a52d0310a88a7a7608cb768d663221a2e552a28" gracePeriod=30 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.068232 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5185a393-4eff-496b-bd08-a8a91ada2a17" (UID: "5185a393-4eff-496b-bd08-a8a91ada2a17"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.067746 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts\") pod \"5185a393-4eff-496b-bd08-a8a91ada2a17\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.068354 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5xps\" (UniqueName: \"kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps\") pod \"5185a393-4eff-496b-bd08-a8a91ada2a17\" (UID: \"5185a393-4eff-496b-bd08-a8a91ada2a17\") " Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.069364 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.069382 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5185a393-4eff-496b-bd08-a8a91ada2a17-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.094383 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps" (OuterVolumeSpecName: "kube-api-access-j5xps") pod "5185a393-4eff-496b-bd08-a8a91ada2a17" (UID: "5185a393-4eff-496b-bd08-a8a91ada2a17"). InnerVolumeSpecName "kube-api-access-j5xps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.170532 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5xps\" (UniqueName: \"kubernetes.io/projected/5185a393-4eff-496b-bd08-a8a91ada2a17-kube-api-access-j5xps\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.172738 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmxm7"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.208187 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.208403 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" containerName="memcached" containerID="cri-o://444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530" gracePeriod=30 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.236886 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmxm7"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.250622 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f67f-account-create-update-lmc6d"] Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251081 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="dnsmasq-dns" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251098 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="dnsmasq-dns" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251120 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251128 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251146 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="cinder-scheduler" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251154 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="cinder-scheduler" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251168 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="ovn-northd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251175 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="ovn-northd" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251189 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251196 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251216 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="galera" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251223 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="galera" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251237 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="probe" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251245 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="probe" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251258 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="ovsdbserver-sb" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251265 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="ovsdbserver-sb" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251282 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251290 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251306 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-server" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251313 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-server" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251325 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251333 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251344 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d4860e9-e690-409f-bc12-86a1c51e6db1" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251353 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d4860e9-e690-409f-bc12-86a1c51e6db1" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251367 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="init" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251374 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="init" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251386 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="mysql-bootstrap" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251394 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="mysql-bootstrap" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251405 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="ovsdbserver-nb" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251413 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="ovsdbserver-nb" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.251428 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-httpd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251436 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-httpd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251652 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" containerName="dnsmasq-dns" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251671 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="ovn-northd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251687 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b99e639e-b687-4552-bfa0-ed4391283aaf" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251697 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251708 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-httpd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251722 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251735 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251747 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1be50193-e246-46b3-bc61-974f6b01b6e7" containerName="ovsdbserver-nb" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251762 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="probe" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251774 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" containerName="proxy-server" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251786 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" containerName="galera" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251795 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c66fcd-07fb-42ea-8176-77a4627b3886" containerName="ovsdbserver-sb" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251805 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d4860e9-e690-409f-bc12-86a1c51e6db1" containerName="openstack-network-exporter" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.251818 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" containerName="cinder-scheduler" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.252490 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.262313 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.287524 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmc6d"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.304345 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jxll9"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.308334 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jxll9"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.315007 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-78jgw"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.329061 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.346980 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.347248 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-c4f8f88d-2whzw" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" containerName="keystone-api" containerID="cri-o://6309e1fbaf859c6c9e8f0f198002bde742b73cff0fec560c9a826574d4ae297a" gracePeriod=30 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.377571 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9zcr\" (UniqueName: \"kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.377645 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.379638 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmc6d"] Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.380296 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-w9zcr operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-f67f-account-create-update-lmc6d" podUID="5a59d8ce-ea12-4717-921f-4f7233a70488" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.396080 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-78jgw"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.408517 4852 generic.go:334] "Generic (PLEG): container finished" podID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerID="b6e25d81fe600efab5fce83e7dd9ac40fe5a0d9d6c2a7e7f3198f555b6740888" exitCode=1 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.408624 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cjmvw" event={"ID":"4dab3827-3a1d-45f0-a021-d0ef66b37f0a","Type":"ContainerDied","Data":"b6e25d81fe600efab5fce83e7dd9ac40fe5a0d9d6c2a7e7f3198f555b6740888"} Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.408658 4852 scope.go:117] "RemoveContainer" containerID="5cbe70cc3cf2a37eac0f5bb2c176c884849bf9df7328d79df88a293c62b5f0fd" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.409185 4852 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-cjmvw" secret="" err="secret \"galera-openstack-dockercfg-q8bpf\" not found" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.409222 4852 scope.go:117] "RemoveContainer" containerID="b6e25d81fe600efab5fce83e7dd9ac40fe5a0d9d6c2a7e7f3198f555b6740888" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.409441 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CrashLoopBackOff: \"back-off 10s restarting failed container=mariadb-account-create-update pod=root-account-create-update-cjmvw_openstack(4dab3827-3a1d-45f0-a021-d0ef66b37f0a)\"" pod="openstack/root-account-create-update-cjmvw" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.412531 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-7nwwb"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.413270 4852 generic.go:334] "Generic (PLEG): container finished" podID="f6b14960-4f7f-465e-8e53-96a14875878e" containerID="40738ebf6fcb34e9c873aac76a52d0310a88a7a7608cb768d663221a2e552a28" exitCode=2 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.413505 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f6b14960-4f7f-465e-8e53-96a14875878e","Type":"ContainerDied","Data":"40738ebf6fcb34e9c873aac76a52d0310a88a7a7608cb768d663221a2e552a28"} Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.441387 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-7nwwb"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.444796 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5832629-fcd6-441c-a349-f771c099f7b4","Type":"ContainerDied","Data":"d9f64c02452c95ac177c99779642663791eff3e1562b0be3661fa1278f0f2ef6"} Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.444884 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.448026 4852 generic.go:334] "Generic (PLEG): container finished" podID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerID="fce074b5c8c23ae7e6f2429e922beae69e6f5e352d49342fb4993c72c8e442cd" exitCode=2 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.448106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerDied","Data":"fce074b5c8c23ae7e6f2429e922beae69e6f5e352d49342fb4993c72c8e442cd"} Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.449334 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" event={"ID":"5185a393-4eff-496b-bd08-a8a91ada2a17","Type":"ContainerDied","Data":"80cb150bb3c91969e2dfb2f5372772dcbf95f7dd36544c2141f10c822aa573e1"} Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.449489 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5040-account-create-update-jmf7z" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.460711 4852 scope.go:117] "RemoveContainer" containerID="10dc1ae907c188abf4087e88e905f3480f1cc46d89aed28f115edb3b25733d49" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.479440 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d4860e9-e690-409f-bc12-86a1c51e6db1" path="/var/lib/kubelet/pods/1d4860e9-e690-409f-bc12-86a1c51e6db1/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.479513 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9zcr\" (UniqueName: \"kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.479560 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.479748 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.479791 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:41.979777315 +0000 UTC m=+1379.197108449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : configmap "openstack-scripts" not found Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.480111 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.480160 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts podName:4dab3827-3a1d-45f0-a021-d0ef66b37f0a nodeName:}" failed. No retries permitted until 2026-01-29 11:04:41.980150914 +0000 UTC m=+1379.197482048 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts") pod "root-account-create-update-cjmvw" (UID: "4dab3827-3a1d-45f0-a021-d0ef66b37f0a") : configmap "openstack-scripts" not found Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.480401 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.171:8776/healthcheck\": read tcp 10.217.0.2:35302->10.217.0.171:8776: read: connection reset by peer" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.480487 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20516ac5-463a-4d2c-a442-d74254876ddf" path="/var/lib/kubelet/pods/20516ac5-463a-4d2c-a442-d74254876ddf/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.481602 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a0e9617-a546-43d8-9ce8-7ff8e22004e8" path="/var/lib/kubelet/pods/4a0e9617-a546-43d8-9ce8-7ff8e22004e8/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.482327 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d3b9c66-5911-42ee-ad3d-c746b8aa5364" path="/var/lib/kubelet/pods/4d3b9c66-5911-42ee-ad3d-c746b8aa5364/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.483028 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e" path="/var/lib/kubelet/pods/5704c357-5a1a-4a0b-9ce9-aa1e5c550c1e/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.485298 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ff8124a-5795-45b2-ae2c-18f779e7da1e" path="/var/lib/kubelet/pods/6ff8124a-5795-45b2-ae2c-18f779e7da1e/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.485772 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7018648b-8a68-423d-9532-8222c0c4b6cc" path="/var/lib/kubelet/pods/7018648b-8a68-423d-9532-8222c0c4b6cc/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.485973 4852 projected.go:194] Error preparing data for projected volume kube-api-access-w9zcr for pod openstack/keystone-f67f-account-create-update-lmc6d: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:41 crc kubenswrapper[4852]: E0129 11:04:41.486102 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:41.986080899 +0000 UTC m=+1379.203412033 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-w9zcr" (UniqueName: "kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.486556 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="716ef279-3244-4dd1-8ae4-a0b17c4d119e" path="/var/lib/kubelet/pods/716ef279-3244-4dd1-8ae4-a0b17c4d119e/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.490709 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77915ad3-b974-4266-ac52-4f81873d744c" path="/var/lib/kubelet/pods/77915ad3-b974-4266-ac52-4f81873d744c/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.491367 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932e2969-4638-44c5-94f3-bb07c5fd4a8f" path="/var/lib/kubelet/pods/932e2969-4638-44c5-94f3-bb07c5fd4a8f/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.492066 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7fa8168-5031-44e4-9e06-03b2cda941f5" path="/var/lib/kubelet/pods/b7fa8168-5031-44e4-9e06-03b2cda941f5/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.492921 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8e8bb0a-9bde-4d34-9306-60c6223cf8b9" path="/var/lib/kubelet/pods/e8e8bb0a-9bde-4d34-9306-60c6223cf8b9/volumes" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.494523 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.615294 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="galera" containerID="cri-o://1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" gracePeriod=30 Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.973612 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:04:41 crc kubenswrapper[4852]: I0129 11:04:41.980963 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.003383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9zcr\" (UniqueName: \"kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.004257 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.003868 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.004443 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.004511 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts podName:4dab3827-3a1d-45f0-a021-d0ef66b37f0a nodeName:}" failed. No retries permitted until 2026-01-29 11:04:43.004483815 +0000 UTC m=+1380.221814999 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts") pod "root-account-create-update-cjmvw" (UID: "4dab3827-3a1d-45f0-a021-d0ef66b37f0a") : configmap "openstack-scripts" not found Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.005221 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:43.004567967 +0000 UTC m=+1380.221899131 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : configmap "openstack-scripts" not found Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.010643 4852 projected.go:194] Error preparing data for projected volume kube-api-access-w9zcr for pod openstack/keystone-f67f-account-create-update-lmc6d: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.010735 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:43.010700796 +0000 UTC m=+1380.228031930 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-w9zcr" (UniqueName: "kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.023801 4852 scope.go:117] "RemoveContainer" containerID="1d33f432ea0c514311532d8ed2d373dc60579c4e1795dff17fb46ca3b939ee8c" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.023953 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.059018 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.064088 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5040-account-create-update-jmf7z"] Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.106147 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-784tl\" (UniqueName: \"kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl\") pod \"f6b14960-4f7f-465e-8e53-96a14875878e\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.106268 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle\") pod \"f6b14960-4f7f-465e-8e53-96a14875878e\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.106368 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config\") pod \"f6b14960-4f7f-465e-8e53-96a14875878e\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.106474 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") pod \"f6b14960-4f7f-465e-8e53-96a14875878e\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.112015 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl" (OuterVolumeSpecName: "kube-api-access-784tl") pod "f6b14960-4f7f-465e-8e53-96a14875878e" (UID: "f6b14960-4f7f-465e-8e53-96a14875878e"). InnerVolumeSpecName "kube-api-access-784tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.126796 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.138707 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.151374 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "f6b14960-4f7f-465e-8e53-96a14875878e" (UID: "f6b14960-4f7f-465e-8e53-96a14875878e"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.174149 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs podName:f6b14960-4f7f-465e-8e53-96a14875878e nodeName:}" failed. No retries permitted until 2026-01-29 11:04:42.674123546 +0000 UTC m=+1379.891454680 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "kube-state-metrics-tls-certs" (UniqueName: "kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs") pod "f6b14960-4f7f-465e-8e53-96a14875878e" (UID: "f6b14960-4f7f-465e-8e53-96a14875878e") : error deleting /var/lib/kubelet/pods/f6b14960-4f7f-465e-8e53-96a14875878e/volume-subpaths: remove /var/lib/kubelet/pods/f6b14960-4f7f-465e-8e53-96a14875878e/volume-subpaths: no such file or directory Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.176425 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6b14960-4f7f-465e-8e53-96a14875878e" (UID: "f6b14960-4f7f-465e-8e53-96a14875878e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.199029 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212032 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212278 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212356 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212448 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212589 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212676 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212765 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212832 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212897 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs" (OuterVolumeSpecName: "logs") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.212916 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213041 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213115 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213213 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcq82\" (UniqueName: \"kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213296 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213399 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln67j\" (UniqueName: \"kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213470 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs\") pod \"65f47530-2db1-46a2-84fa-dde28af57083\" (UID: \"65f47530-2db1-46a2-84fa-dde28af57083\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.213534 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs\") pod \"e967d95c-8de4-4167-82ef-1b32f6026476\" (UID: \"e967d95c-8de4-4167-82ef-1b32f6026476\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.214106 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65f47530-2db1-46a2-84fa-dde28af57083-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.214177 4852 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.214240 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-784tl\" (UniqueName: \"kubernetes.io/projected/f6b14960-4f7f-465e-8e53-96a14875878e-kube-api-access-784tl\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.214312 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.214314 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.218851 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs" (OuterVolumeSpecName: "logs") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.221521 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.222738 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts" (OuterVolumeSpecName: "scripts") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.230151 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j" (OuterVolumeSpecName: "kube-api-access-ln67j") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "kube-api-access-ln67j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.242968 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts" (OuterVolumeSpecName: "scripts") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.243798 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82" (OuterVolumeSpecName: "kube-api-access-hcq82") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "kube-api-access-hcq82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.270409 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.315332 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs\") pod \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.315643 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data\") pod \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.315692 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kh6r\" (UniqueName: \"kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r\") pod \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.315854 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle\") pod \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.315903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs\") pod \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\" (UID: \"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316258 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e967d95c-8de4-4167-82ef-1b32f6026476-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316276 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316287 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316295 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e967d95c-8de4-4167-82ef-1b32f6026476-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316303 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316311 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316320 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcq82\" (UniqueName: \"kubernetes.io/projected/65f47530-2db1-46a2-84fa-dde28af57083-kube-api-access-hcq82\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316328 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln67j\" (UniqueName: \"kubernetes.io/projected/e967d95c-8de4-4167-82ef-1b32f6026476-kube-api-access-ln67j\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.316614 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs" (OuterVolumeSpecName: "logs") pod "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" (UID: "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.320494 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.324339 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data" (OuterVolumeSpecName: "config-data") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.324497 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r" (OuterVolumeSpecName: "kube-api-access-9kh6r") pod "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" (UID: "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2"). InnerVolumeSpecName "kube-api-access-9kh6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.327727 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-588c766876-422z6" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.167:9311/healthcheck\": read tcp 10.217.0.2:38928->10.217.0.167:9311: read: connection reset by peer" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.328193 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-588c766876-422z6" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.167:9311/healthcheck\": read tcp 10.217.0.2:38942->10.217.0.167:9311: read: connection reset by peer" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.341910 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.355047 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data" (OuterVolumeSpecName: "config-data") pod "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" (UID: "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.378458 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" (UID: "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.382335 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.383106 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data" (OuterVolumeSpecName: "config-data") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.383752 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" (UID: "99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.388383 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.392296 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e967d95c-8de4-4167-82ef-1b32f6026476" (UID: "e967d95c-8de4-4167-82ef-1b32f6026476"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.399247 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417189 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data\") pod \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417230 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config\") pod \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417259 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle\") pod \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417341 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srnth\" (UniqueName: \"kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth\") pod \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417375 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs\") pod \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\" (UID: \"ce70759d-e206-41b9-b8d2-52a8ca74f67c\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417976 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.417995 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418008 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418018 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418030 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418041 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kh6r\" (UniqueName: \"kubernetes.io/projected/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-kube-api-access-9kh6r\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418052 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418063 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418073 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e967d95c-8de4-4167-82ef-1b32f6026476-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418084 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.418095 4852 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.419121 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "ce70759d-e206-41b9-b8d2-52a8ca74f67c" (UID: "ce70759d-e206-41b9-b8d2-52a8ca74f67c"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.419928 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data" (OuterVolumeSpecName: "config-data") pod "ce70759d-e206-41b9-b8d2-52a8ca74f67c" (UID: "ce70759d-e206-41b9-b8d2-52a8ca74f67c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.436264 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth" (OuterVolumeSpecName: "kube-api-access-srnth") pod "ce70759d-e206-41b9-b8d2-52a8ca74f67c" (UID: "ce70759d-e206-41b9-b8d2-52a8ca74f67c"). InnerVolumeSpecName "kube-api-access-srnth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.475340 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce70759d-e206-41b9-b8d2-52a8ca74f67c" (UID: "ce70759d-e206-41b9-b8d2-52a8ca74f67c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.475960 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "ce70759d-e206-41b9-b8d2-52a8ca74f67c" (UID: "ce70759d-e206-41b9-b8d2-52a8ca74f67c"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.486875 4852 generic.go:334] "Generic (PLEG): container finished" podID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerID="3fba4011047b82105d23692b2e3c2f2018cbd57dcbd733255f9715aaff21b5b9" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.487071 4852 generic.go:334] "Generic (PLEG): container finished" podID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerID="d816637dddc4a3042b5748abee104ea2ba5b25b7a148418ca6e26b6a15ced4dc" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.487177 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerDied","Data":"3fba4011047b82105d23692b2e3c2f2018cbd57dcbd733255f9715aaff21b5b9"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.487270 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerDied","Data":"d816637dddc4a3042b5748abee104ea2ba5b25b7a148418ca6e26b6a15ced4dc"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.498885 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "65f47530-2db1-46a2-84fa-dde28af57083" (UID: "65f47530-2db1-46a2-84fa-dde28af57083"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.506079 4852 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-cjmvw" secret="" err="secret \"galera-openstack-dockercfg-q8bpf\" not found" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.506361 4852 scope.go:117] "RemoveContainer" containerID="b6e25d81fe600efab5fce83e7dd9ac40fe5a0d9d6c2a7e7f3198f555b6740888" Jan 29 11:04:42 crc kubenswrapper[4852]: E0129 11:04:42.506776 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CrashLoopBackOff: \"back-off 10s restarting failed container=mariadb-account-create-update pod=root-account-create-update-cjmvw_openstack(4dab3827-3a1d-45f0-a021-d0ef66b37f0a)\"" pod="openstack/root-account-create-update-cjmvw" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.514073 4852 generic.go:334] "Generic (PLEG): container finished" podID="0bd5133c-258f-4725-9da4-17941a408af8" containerID="02a21826da03d176e3af7a859639e921f42ebe93b8e3d176115585c9b0fb9752" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.514660 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerDied","Data":"02a21826da03d176e3af7a859639e921f42ebe93b8e3d176115585c9b0fb9752"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.521207 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.521371 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.521515 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.521687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2px9m\" (UniqueName: \"kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.521908 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.522157 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs\") pod \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\" (UID: \"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e\") " Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523210 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65f47530-2db1-46a2-84fa-dde28af57083-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523323 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523485 4852 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523566 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523672 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srnth\" (UniqueName: \"kubernetes.io/projected/ce70759d-e206-41b9-b8d2-52a8ca74f67c-kube-api-access-srnth\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.523763 4852 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce70759d-e206-41b9-b8d2-52a8ca74f67c-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.524459 4852 generic.go:334] "Generic (PLEG): container finished" podID="232868d3-4c67-4820-b75c-e90009acf440" containerID="496427d75669b53712641a841d59135547182e1cbb1fb27f1360eb43642abdda" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.524559 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerDied","Data":"496427d75669b53712641a841d59135547182e1cbb1fb27f1360eb43642abdda"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.525341 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs" (OuterVolumeSpecName: "logs") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.527863 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m" (OuterVolumeSpecName: "kube-api-access-2px9m") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "kube-api-access-2px9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.548256 4852 generic.go:334] "Generic (PLEG): container finished" podID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerID="540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.548365 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerDied","Data":"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.548396 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2","Type":"ContainerDied","Data":"ef0d72dbc47522942e34315d745935b38756123b1c5354e77002335cb554c5f8"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.548415 4852 scope.go:117] "RemoveContainer" containerID="540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.548649 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.556416 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f6b14960-4f7f-465e-8e53-96a14875878e","Type":"ContainerDied","Data":"539f66142b2fd4c380d37975aff9441915243d44b03a29c02ff72640cbfd93f9"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.556772 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data" (OuterVolumeSpecName: "config-data") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.556985 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.581647 4852 generic.go:334] "Generic (PLEG): container finished" podID="65f47530-2db1-46a2-84fa-dde28af57083" containerID="c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.581745 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5c4b86c744-rrhm8" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.582003 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerDied","Data":"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.587685 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5c4b86c744-rrhm8" event={"ID":"65f47530-2db1-46a2-84fa-dde28af57083","Type":"ContainerDied","Data":"8f16b9012519b79efc513e1f18d3b767fcd05df55aa6042bb6f2264563e53734"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.593542 4852 generic.go:334] "Generic (PLEG): container finished" podID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" containerID="444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530" exitCode=0 Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.593690 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.593917 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ce70759d-e206-41b9-b8d2-52a8ca74f67c","Type":"ContainerDied","Data":"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530"} Jan 29 11:04:42 crc kubenswrapper[4852]: I0129 11:04:42.593939 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ce70759d-e206-41b9-b8d2-52a8ca74f67c","Type":"ContainerDied","Data":"9232d7f44a8ae6ca9010010b4039552a2e20aaa160b8e92f4137b27b8c291272"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.604338 4852 generic.go:334] "Generic (PLEG): container finished" podID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerID="d56126df6c76ee4d2b57d1a9bfa70a3c707884469080624229983943f11c8570" exitCode=0 Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.604445 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerDied","Data":"d56126df6c76ee4d2b57d1a9bfa70a3c707884469080624229983943f11c8570"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.629331 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.629351 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.629360 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2px9m\" (UniqueName: \"kubernetes.io/projected/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-kube-api-access-2px9m\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.631434 4852 generic.go:334] "Generic (PLEG): container finished" podID="e967d95c-8de4-4167-82ef-1b32f6026476" containerID="f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983" exitCode=0 Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.631484 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerDied","Data":"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.631506 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e967d95c-8de4-4167-82ef-1b32f6026476","Type":"ContainerDied","Data":"e8afd6b1f7b101e5b00004ac0fadc68cd2f4847b91fa7a3b8d797c3ad5375834"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.631568 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.633380 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.639977 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.640081 4852 generic.go:334] "Generic (PLEG): container finished" podID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerID="d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2" exitCode=0 Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.640190 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.640312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.640410 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerDied","Data":"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.640481 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a31f5d33-4598-4ecb-9b2c-fc8271e8d29e","Type":"ContainerDied","Data":"732d58c88cef74e39f596b2616b324d23c644efa7ea0d2ed0f73157102ab85f6"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.678563 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" (UID: "a31f5d33-4598-4ecb-9b2c-fc8271e8d29e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.730438 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") pod \"f6b14960-4f7f-465e-8e53-96a14875878e\" (UID: \"f6b14960-4f7f-465e-8e53-96a14875878e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.731772 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.731790 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.731824 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.733281 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "f6b14960-4f7f-465e-8e53-96a14875878e" (UID: "f6b14960-4f7f-465e-8e53-96a14875878e"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.748046 4852 scope.go:117] "RemoveContainer" containerID="d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.776563 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.785722 4852 scope.go:117] "RemoveContainer" containerID="540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.785996 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035\": container with ID starting with 540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035 not found: ID does not exist" containerID="540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.786031 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035"} err="failed to get container status \"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035\": rpc error: code = NotFound desc = could not find container \"540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035\": container with ID starting with 540f8d948fc1f10ef374376c42cba6789d3c9d6e79cdcd84e5d3b0bdf8ec3035 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.786066 4852 scope.go:117] "RemoveContainer" containerID="d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.786889 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb\": container with ID starting with d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb not found: ID does not exist" containerID="d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.786913 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb"} err="failed to get container status \"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb\": rpc error: code = NotFound desc = could not find container \"d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb\": container with ID starting with d81b0e4d7f9e8b336e78ac7db88a8380b261e5807da06cd3447b8451c00962eb not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.786932 4852 scope.go:117] "RemoveContainer" containerID="40738ebf6fcb34e9c873aac76a52d0310a88a7a7608cb768d663221a2e552a28" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.787078 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.828035 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.837434 4852 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b14960-4f7f-465e-8e53-96a14875878e-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.840214 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.840694 4852 scope.go:117] "RemoveContainer" containerID="c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.864879 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.878145 4852 scope.go:117] "RemoveContainer" containerID="6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.879704 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.887137 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.898403 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.907993 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5c4b86c744-rrhm8"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.915273 4852 scope.go:117] "RemoveContainer" containerID="c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.915471 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.916001 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc\": container with ID starting with c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc not found: ID does not exist" containerID="c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.916024 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc"} err="failed to get container status \"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc\": rpc error: code = NotFound desc = could not find container \"c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc\": container with ID starting with c7b3ba5b21fc928734cedd6c004b14aea65d0f942dd5bb3e2aeb3cd95c6380bc not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.916043 4852 scope.go:117] "RemoveContainer" containerID="6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.916236 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9\": container with ID starting with 6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9 not found: ID does not exist" containerID="6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.916252 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9"} err="failed to get container status \"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9\": rpc error: code = NotFound desc = could not find container \"6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9\": container with ID starting with 6f6adac473e1d31be7e554fd06aa8aab81c9c4ec0cf72f85e9d43f477eeb65b9 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.916264 4852 scope.go:117] "RemoveContainer" containerID="444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.927723 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.937810 4852 scope.go:117] "RemoveContainer" containerID="444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938202 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938293 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.938303 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530\": container with ID starting with 444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530 not found: ID does not exist" containerID="444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938332 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938346 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530"} err="failed to get container status \"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530\": rpc error: code = NotFound desc = could not find container \"444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530\": container with ID starting with 444c823a02568c6f9e6e45f2526894573a67369c02352b12bab1e12647c83530 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938378 4852 scope.go:117] "RemoveContainer" containerID="f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938380 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938413 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938440 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938467 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938500 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938527 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjwfs\" (UniqueName: \"kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938548 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfx4v\" (UniqueName: \"kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938596 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938616 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs\") pod \"232868d3-4c67-4820-b75c-e90009acf440\" (UID: \"232868d3-4c67-4820-b75c-e90009acf440\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938708 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data\") pod \"0bd5133c-258f-4725-9da4-17941a408af8\" (UID: \"0bd5133c-258f-4725-9da4-17941a408af8\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.938796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.939138 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.941658 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.942298 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs" (OuterVolumeSpecName: "logs") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.942384 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.943015 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs" (OuterVolumeSpecName: "logs") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.945755 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs" (OuterVolumeSpecName: "kube-api-access-zjwfs") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "kube-api-access-zjwfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.949317 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.950500 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v" (OuterVolumeSpecName: "kube-api-access-dfx4v") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "kube-api-access-dfx4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.950559 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts" (OuterVolumeSpecName: "scripts") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.967360 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.976250 4852 scope.go:117] "RemoveContainer" containerID="5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.976999 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.977289 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.978826 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.984016 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.994890 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.995536 4852 scope.go:117] "RemoveContainer" containerID="f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.995895 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983\": container with ID starting with f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983 not found: ID does not exist" containerID="f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.995932 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983"} err="failed to get container status \"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983\": rpc error: code = NotFound desc = could not find container \"f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983\": container with ID starting with f27b40537ab2789f4269f9c062d5e8511671edd005fe549571be3d5434b99983 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.995960 4852 scope.go:117] "RemoveContainer" containerID="5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:42.996184 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351\": container with ID starting with 5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351 not found: ID does not exist" containerID="5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.996201 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351"} err="failed to get container status \"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351\": rpc error: code = NotFound desc = could not find container \"5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351\": container with ID starting with 5cd35bbd911398d64191a04944f154b8fe0f76fb0c31e60f7d2de9d2ad2c9351 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:42.996215 4852 scope.go:117] "RemoveContainer" containerID="d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.001647 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.003844 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.022956 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data" (OuterVolumeSpecName: "config-data") pod "0bd5133c-258f-4725-9da4-17941a408af8" (UID: "0bd5133c-258f-4725-9da4-17941a408af8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.022998 4852 scope.go:117] "RemoveContainer" containerID="38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.023004 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data" (OuterVolumeSpecName: "config-data") pod "232868d3-4c67-4820-b75c-e90009acf440" (UID: "232868d3-4c67-4820-b75c-e90009acf440"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.037340 4852 scope.go:117] "RemoveContainer" containerID="d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.037753 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2\": container with ID starting with d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2 not found: ID does not exist" containerID="d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.037782 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2"} err="failed to get container status \"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2\": rpc error: code = NotFound desc = could not find container \"d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2\": container with ID starting with d914c07f139ace4acc1ed65b8ec61a0c7cb2ef9749c154e22e8545e10d934cf2 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.037802 4852 scope.go:117] "RemoveContainer" containerID="38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.038118 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5\": container with ID starting with 38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5 not found: ID does not exist" containerID="38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.038155 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5"} err="failed to get container status \"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5\": rpc error: code = NotFound desc = could not find container \"38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5\": container with ID starting with 38010181520a2c80ed361cc7074c14baa63919ee275fa9584cbb2f1c2af11ab5 not found: ID does not exist" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.039981 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9zcr\" (UniqueName: \"kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040017 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts\") pod \"keystone-f67f-account-create-update-lmc6d\" (UID: \"5a59d8ce-ea12-4717-921f-4f7233a70488\") " pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.040061 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.040124 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts podName:4dab3827-3a1d-45f0-a021-d0ef66b37f0a nodeName:}" failed. No retries permitted until 2026-01-29 11:04:45.040103795 +0000 UTC m=+1382.257434919 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts") pod "root-account-create-update-cjmvw" (UID: "4dab3827-3a1d-45f0-a021-d0ef66b37f0a") : configmap "openstack-scripts" not found Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.040208 4852 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040262 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040277 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040286 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.040313 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:45.04028479 +0000 UTC m=+1382.257615934 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : configmap "openstack-scripts" not found Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040347 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040368 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjwfs\" (UniqueName: \"kubernetes.io/projected/0bd5133c-258f-4725-9da4-17941a408af8-kube-api-access-zjwfs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040384 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfx4v\" (UniqueName: \"kubernetes.io/projected/232868d3-4c67-4820-b75c-e90009acf440-kube-api-access-dfx4v\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040703 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd5133c-258f-4725-9da4-17941a408af8-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040719 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040732 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040744 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/232868d3-4c67-4820-b75c-e90009acf440-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040758 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040771 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd5133c-258f-4725-9da4-17941a408af8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040809 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.040825 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232868d3-4c67-4820-b75c-e90009acf440-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.042748 4852 projected.go:194] Error preparing data for projected volume kube-api-access-w9zcr for pod openstack/keystone-f67f-account-create-update-lmc6d: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.042805 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr podName:5a59d8ce-ea12-4717-921f-4f7233a70488 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:45.042786341 +0000 UTC m=+1382.260117465 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-w9zcr" (UniqueName: "kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr") pod "keystone-f67f-account-create-update-lmc6d" (UID: "5a59d8ce-ea12-4717-921f-4f7233a70488") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.063248 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.142221 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.182898 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.184139 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.186604 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:43 crc kubenswrapper[4852]: E0129 11:04:43.186644 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" containerName="nova-cell1-conductor-conductor" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.493620 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5185a393-4eff-496b-bd08-a8a91ada2a17" path="/var/lib/kubelet/pods/5185a393-4eff-496b-bd08-a8a91ada2a17/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.494760 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65f47530-2db1-46a2-84fa-dde28af57083" path="/var/lib/kubelet/pods/65f47530-2db1-46a2-84fa-dde28af57083/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.496349 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" path="/var/lib/kubelet/pods/99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.498626 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" path="/var/lib/kubelet/pods/a31f5d33-4598-4ecb-9b2c-fc8271e8d29e/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.500279 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" path="/var/lib/kubelet/pods/ce70759d-e206-41b9-b8d2-52a8ca74f67c/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.502464 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5832629-fcd6-441c-a349-f771c099f7b4" path="/var/lib/kubelet/pods/e5832629-fcd6-441c-a349-f771c099f7b4/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.506490 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" path="/var/lib/kubelet/pods/e967d95c-8de4-4167-82ef-1b32f6026476/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.508195 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6b14960-4f7f-465e-8e53-96a14875878e" path="/var/lib/kubelet/pods/f6b14960-4f7f-465e-8e53-96a14875878e/volumes" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.657833 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"232868d3-4c67-4820-b75c-e90009acf440","Type":"ContainerDied","Data":"3e8cb7241d510c2d3750ad6e18056562f6adb3fec55eb5c38ef0e670cbcc97e0"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.657924 4852 scope.go:117] "RemoveContainer" containerID="496427d75669b53712641a841d59135547182e1cbb1fb27f1360eb43642abdda" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.658051 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.681119 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.683288 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-588c766876-422z6" event={"ID":"0bd5133c-258f-4725-9da4-17941a408af8","Type":"ContainerDied","Data":"b8a7ceeceb73045f64c69bea74d5dea7884e3a3e03aa3a441daa916c3097cd3d"} Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.683323 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-588c766876-422z6" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.701469 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.707422 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f67f-account-create-update-lmc6d" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.711139 4852 scope.go:117] "RemoveContainer" containerID="d75357c4635ac4b27b487dfd891bbe8e4a30c70a0f91635d4c51f8b3a4c92c2b" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.803234 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.810702 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmc6d"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.815451 4852 scope.go:117] "RemoveContainer" containerID="02a21826da03d176e3af7a859639e921f42ebe93b8e3d176115585c9b0fb9752" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.819891 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f67f-account-create-update-lmc6d"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.829022 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.834908 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-588c766876-422z6"] Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.843086 4852 scope.go:117] "RemoveContainer" containerID="32ee64127b13aab5bf090c833967ae28b8675a965dc8eb24e8e5d01c8cd166f4" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.957713 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958065 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958115 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958221 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958241 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958279 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6b574\" (UniqueName: \"kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958347 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958365 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts\") pod \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\" (UID: \"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e\") " Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958712 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9zcr\" (UniqueName: \"kubernetes.io/projected/5a59d8ce-ea12-4717-921f-4f7233a70488-kube-api-access-w9zcr\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.958725 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a59d8ce-ea12-4717-921f-4f7233a70488-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.978745 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.981766 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs" (OuterVolumeSpecName: "logs") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.988543 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.988554 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts" (OuterVolumeSpecName: "scripts") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:43 crc kubenswrapper[4852]: I0129 11:04:43.990220 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574" (OuterVolumeSpecName: "kube-api-access-6b574") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "kube-api-access-6b574". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.007367 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.014753 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data" (OuterVolumeSpecName: "config-data") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061789 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061827 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061840 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6b574\" (UniqueName: \"kubernetes.io/projected/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-kube-api-access-6b574\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061852 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061865 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061886 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.061899 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.062107 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.062448 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data podName:f84fb26d-e835-4d75-95d5-695b6e033bb7 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:52.062422522 +0000 UTC m=+1389.279753666 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data") pod "rabbitmq-cell1-server-0" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7") : configmap "rabbitmq-cell1-config-data" not found Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.074678 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" (UID: "a29eddc2-c6bd-46c0-ba00-5a08b8b6793e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.078688 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.097982 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.162506 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npdhd\" (UniqueName: \"kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd\") pod \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.162618 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts\") pod \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\" (UID: \"4dab3827-3a1d-45f0-a021-d0ef66b37f0a\") " Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.163038 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.163051 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.163433 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4dab3827-3a1d-45f0-a021-d0ef66b37f0a" (UID: "4dab3827-3a1d-45f0-a021-d0ef66b37f0a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.173144 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd" (OuterVolumeSpecName: "kube-api-access-npdhd") pod "4dab3827-3a1d-45f0-a021-d0ef66b37f0a" (UID: "4dab3827-3a1d-45f0-a021-d0ef66b37f0a"). InnerVolumeSpecName "kube-api-access-npdhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.265691 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npdhd\" (UniqueName: \"kubernetes.io/projected/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-kube-api-access-npdhd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.265721 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4dab3827-3a1d-45f0-a021-d0ef66b37f0a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.367669 4852 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.367758 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data podName:1ab8189f-e95a-47b5-a130-5404901974e2 nodeName:}" failed. No retries permitted until 2026-01-29 11:04:52.367738346 +0000 UTC m=+1389.585069480 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data") pod "rabbitmq-server-0" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2") : configmap "rabbitmq-config-data" not found Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.525202 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/keystone-c4f8f88d-2whzw" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" containerName="keystone-api" probeResult="failure" output="Get \"https://10.217.0.156:5000/v3\": read tcp 10.217.0.2:60222->10.217.0.156:5000: read: connection reset by peer" Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.536549 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.537928 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.539827 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.539886 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.718885 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a29eddc2-c6bd-46c0-ba00-5a08b8b6793e","Type":"ContainerDied","Data":"2021a6739ab6a8fc799c4e44a58a0f73eec73d8aa20969e0e2e36294cda2671e"} Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.718938 4852 scope.go:117] "RemoveContainer" containerID="d56126df6c76ee4d2b57d1a9bfa70a3c707884469080624229983943f11c8570" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.719058 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.731621 4852 generic.go:334] "Generic (PLEG): container finished" podID="5611a301-79d2-4082-beba-c95db2a2bcad" containerID="6309e1fbaf859c6c9e8f0f198002bde742b73cff0fec560c9a826574d4ae297a" exitCode=0 Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.731693 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c4f8f88d-2whzw" event={"ID":"5611a301-79d2-4082-beba-c95db2a2bcad","Type":"ContainerDied","Data":"6309e1fbaf859c6c9e8f0f198002bde742b73cff0fec560c9a826574d4ae297a"} Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.733936 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cjmvw" event={"ID":"4dab3827-3a1d-45f0-a021-d0ef66b37f0a","Type":"ContainerDied","Data":"8d79ffcbd42e91fe782c0afbe62ab3812addcd453e6264aad0f1a2ed4898b268"} Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.734005 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cjmvw" Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.760392 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.762214 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.763933 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 11:04:44 crc kubenswrapper[4852]: E0129 11:04:44.764003 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.797643 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.807442 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-cjmvw"] Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.819068 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.826135 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.831926 4852 scope.go:117] "RemoveContainer" containerID="ce3f6c95a078dc3bea52fa656715a9c015827b3d5a2ef4d87997ac240eeab0e7" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.857694 4852 scope.go:117] "RemoveContainer" containerID="b6e25d81fe600efab5fce83e7dd9ac40fe5a0d9d6c2a7e7f3198f555b6740888" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.877866 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.206:3000/\": dial tcp 10.217.0.206:3000: connect: connection refused" Jan 29 11:04:44 crc kubenswrapper[4852]: I0129 11:04:44.960445 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.075008 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-67sl6" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" probeResult="failure" output="" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.081886 4852 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 11:04:45 crc kubenswrapper[4852]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T11:04:38Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 11:04:45 crc kubenswrapper[4852]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 11:04:45 crc kubenswrapper[4852]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-67sl6" message=< Jan 29 11:04:45 crc kubenswrapper[4852]: Exiting ovn-controller (1) [FAILED] Jan 29 11:04:45 crc kubenswrapper[4852]: Killing ovn-controller (1) [ OK ] Jan 29 11:04:45 crc kubenswrapper[4852]: 2026-01-29T11:04:38Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 11:04:45 crc kubenswrapper[4852]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 11:04:45 crc kubenswrapper[4852]: > Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.081944 4852 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 11:04:45 crc kubenswrapper[4852]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T11:04:38Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 11:04:45 crc kubenswrapper[4852]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 11:04:45 crc kubenswrapper[4852]: > pod="openstack/ovn-controller-67sl6" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" containerID="cri-o://825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.082024 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-67sl6" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" containerID="cri-o://825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" gracePeriod=23 Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086567 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086646 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086733 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086767 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086801 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086838 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086944 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njxns\" (UniqueName: \"kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.086991 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys\") pod \"5611a301-79d2-4082-beba-c95db2a2bcad\" (UID: \"5611a301-79d2-4082-beba-c95db2a2bcad\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.091858 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.098400 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts" (OuterVolumeSpecName: "scripts") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.098648 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.098811 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns" (OuterVolumeSpecName: "kube-api-access-njxns") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "kube-api-access-njxns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.122246 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.123399 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data" (OuterVolumeSpecName: "config-data") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.137212 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.142726 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5611a301-79d2-4082-beba-c95db2a2bcad" (UID: "5611a301-79d2-4082-beba-c95db2a2bcad"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.164486 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189777 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189803 4852 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189813 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189821 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189829 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189839 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189848 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njxns\" (UniqueName: \"kubernetes.io/projected/5611a301-79d2-4082-beba-c95db2a2bcad-kube-api-access-njxns\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.189857 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5611a301-79d2-4082-beba-c95db2a2bcad-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.276701 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8 is running failed: container process not found" containerID="825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.277056 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8 is running failed: container process not found" containerID="825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.277507 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8 is running failed: container process not found" containerID="825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.277545 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-67sl6" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291295 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291387 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291434 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291456 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291491 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4czr\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291552 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291574 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291628 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291656 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291734 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.291758 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret\") pod \"f84fb26d-e835-4d75-95d5-695b6e033bb7\" (UID: \"f84fb26d-e835-4d75-95d5-695b6e033bb7\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.292089 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.292211 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.292351 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.292656 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.294623 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr" (OuterVolumeSpecName: "kube-api-access-r4czr") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "kube-api-access-r4czr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.294653 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.295625 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.296092 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info" (OuterVolumeSpecName: "pod-info") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.296621 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.301713 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.303113 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.303144 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.304960 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.304970 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.305969 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.309926 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.310007 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.313383 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data" (OuterVolumeSpecName: "config-data") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.338093 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf" (OuterVolumeSpecName: "server-conf") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.376509 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f84fb26d-e835-4d75-95d5-695b6e033bb7" (UID: "f84fb26d-e835-4d75-95d5-695b6e033bb7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.393967 4852 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f84fb26d-e835-4d75-95d5-695b6e033bb7-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394000 4852 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f84fb26d-e835-4d75-95d5-695b6e033bb7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394035 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394047 4852 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394060 4852 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394071 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f84fb26d-e835-4d75-95d5-695b6e033bb7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394082 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4czr\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-kube-api-access-r4czr\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394092 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394103 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.394113 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f84fb26d-e835-4d75-95d5-695b6e033bb7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.412098 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.473822 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bd5133c-258f-4725-9da4-17941a408af8" path="/var/lib/kubelet/pods/0bd5133c-258f-4725-9da4-17941a408af8/volumes" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.475041 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="232868d3-4c67-4820-b75c-e90009acf440" path="/var/lib/kubelet/pods/232868d3-4c67-4820-b75c-e90009acf440/volumes" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.476205 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" path="/var/lib/kubelet/pods/4dab3827-3a1d-45f0-a021-d0ef66b37f0a/volumes" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.477513 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a59d8ce-ea12-4717-921f-4f7233a70488" path="/var/lib/kubelet/pods/5a59d8ce-ea12-4717-921f-4f7233a70488/volumes" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.478034 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" path="/var/lib/kubelet/pods/a29eddc2-c6bd-46c0-ba00-5a08b8b6793e/volumes" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.495566 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.668932 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.753666 4852 generic.go:334] "Generic (PLEG): container finished" podID="1ab8189f-e95a-47b5-a130-5404901974e2" containerID="f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6" exitCode=0 Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.753814 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerDied","Data":"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.753857 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.753880 4852 scope.go:117] "RemoveContainer" containerID="f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.753865 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1ab8189f-e95a-47b5-a130-5404901974e2","Type":"ContainerDied","Data":"ef9d240ed16cad0fc35c05ef8c5c8ae493cfaf3142fa7bc88122a228f78918b9"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.756837 4852 generic.go:334] "Generic (PLEG): container finished" podID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerID="c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77" exitCode=0 Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.756923 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.756934 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerDied","Data":"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.757735 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f84fb26d-e835-4d75-95d5-695b6e033bb7","Type":"ContainerDied","Data":"748b797a12353bbc561a885499225b3ebcdac7c649a8a1b5f8692ebf0da123f5"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.761203 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c4f8f88d-2whzw" event={"ID":"5611a301-79d2-4082-beba-c95db2a2bcad","Type":"ContainerDied","Data":"496da8b54a5a1c14319b94d4309a09b3a3574c099b65fb811e21ce9ebf17b180"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.761287 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c4f8f88d-2whzw" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.768026 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-67sl6_ceb5f935-ccb2-4449-964f-b48d616eefea/ovn-controller/0.log" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.768276 4852 generic.go:334] "Generic (PLEG): container finished" podID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerID="825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" exitCode=139 Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.768301 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6" event={"ID":"ceb5f935-ccb2-4449-964f-b48d616eefea","Type":"ContainerDied","Data":"825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8"} Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.789943 4852 scope.go:117] "RemoveContainer" containerID="c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.799913 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800073 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800133 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800170 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjnnr\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800203 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800241 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800511 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800566 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800625 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.800681 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.801008 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"1ab8189f-e95a-47b5-a130-5404901974e2\" (UID: \"1ab8189f-e95a-47b5-a130-5404901974e2\") " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.809651 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.811203 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info" (OuterVolumeSpecName: "pod-info") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.814258 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.815281 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.823081 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.825952 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.826679 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr" (OuterVolumeSpecName: "kube-api-access-gjnnr") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "kube-api-access-gjnnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.828501 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.834991 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.860948 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.873436 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.879013 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c4f8f88d-2whzw"] Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.892010 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf" (OuterVolumeSpecName: "server-conf") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.893023 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data" (OuterVolumeSpecName: "config-data") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920714 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920771 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920783 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjnnr\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-kube-api-access-gjnnr\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920794 4852 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ab8189f-e95a-47b5-a130-5404901974e2-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920807 4852 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920820 4852 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ab8189f-e95a-47b5-a130-5404901974e2-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920861 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.920872 4852 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.936628 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.936676 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ab8189f-e95a-47b5-a130-5404901974e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.939150 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.964944 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "1ab8189f-e95a-47b5-a130-5404901974e2" (UID: "1ab8189f-e95a-47b5-a130-5404901974e2"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.973722 4852 scope.go:117] "RemoveContainer" containerID="f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.974342 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6\": container with ID starting with f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6 not found: ID does not exist" containerID="f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.974385 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6"} err="failed to get container status \"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6\": rpc error: code = NotFound desc = could not find container \"f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6\": container with ID starting with f679f1f0306837fa8231c3b167e76e28d0914fcb90ea6db68cbef1c476e79dd6 not found: ID does not exist" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.974411 4852 scope.go:117] "RemoveContainer" containerID="c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28" Jan 29 11:04:45 crc kubenswrapper[4852]: E0129 11:04:45.975073 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28\": container with ID starting with c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28 not found: ID does not exist" containerID="c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.975093 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28"} err="failed to get container status \"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28\": rpc error: code = NotFound desc = could not find container \"c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28\": container with ID starting with c73d1cafaee947eabbf3ca8efc6e3a82b82619b662614a964ef2e201c4d0ab28 not found: ID does not exist" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.975107 4852 scope.go:117] "RemoveContainer" containerID="c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77" Jan 29 11:04:45 crc kubenswrapper[4852]: I0129 11:04:45.991278 4852 scope.go:117] "RemoveContainer" containerID="91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.006232 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-67sl6_ceb5f935-ccb2-4449-964f-b48d616eefea/ovn-controller/0.log" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.006301 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.020735 4852 scope.go:117] "RemoveContainer" containerID="c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77" Jan 29 11:04:46 crc kubenswrapper[4852]: E0129 11:04:46.023822 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77\": container with ID starting with c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77 not found: ID does not exist" containerID="c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.023914 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77"} err="failed to get container status \"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77\": rpc error: code = NotFound desc = could not find container \"c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77\": container with ID starting with c8787c65b86c244d496cfdf1305ed28653780ba7dc395aed34160fe829e8ba77 not found: ID does not exist" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.023940 4852 scope.go:117] "RemoveContainer" containerID="91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a" Jan 29 11:04:46 crc kubenswrapper[4852]: E0129 11:04:46.025512 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a\": container with ID starting with 91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a not found: ID does not exist" containerID="91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.025597 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a"} err="failed to get container status \"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a\": rpc error: code = NotFound desc = could not find container \"91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a\": container with ID starting with 91e883ca847ef913823e9bebe8cd1705e4e5c622a2d6fa0be1c262d0a7a9b99a not found: ID does not exist" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.025629 4852 scope.go:117] "RemoveContainer" containerID="6309e1fbaf859c6c9e8f0f198002bde742b73cff0fec560c9a826574d4ae297a" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.037711 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.037748 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ab8189f-e95a-47b5-a130-5404901974e2-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.083076 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.088480 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138628 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138691 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138734 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138786 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138819 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138873 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.138952 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rpvj\" (UniqueName: \"kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj\") pod \"ceb5f935-ccb2-4449-964f-b48d616eefea\" (UID: \"ceb5f935-ccb2-4449-964f-b48d616eefea\") " Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.139309 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.139367 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.139768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run" (OuterVolumeSpecName: "var-run") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.140626 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts" (OuterVolumeSpecName: "scripts") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.143229 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj" (OuterVolumeSpecName: "kube-api-access-5rpvj") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "kube-api-access-5rpvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.158064 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.236496 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "ceb5f935-ccb2-4449-964f-b48d616eefea" (UID: "ceb5f935-ccb2-4449-964f-b48d616eefea"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240362 4852 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240401 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240411 4852 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240419 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ceb5f935-ccb2-4449-964f-b48d616eefea-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240427 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb5f935-ccb2-4449-964f-b48d616eefea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240436 4852 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ceb5f935-ccb2-4449-964f-b48d616eefea-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.240446 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rpvj\" (UniqueName: \"kubernetes.io/projected/ceb5f935-ccb2-4449-964f-b48d616eefea-kube-api-access-5rpvj\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.777954 4852 generic.go:334] "Generic (PLEG): container finished" podID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerID="8366b4f53588b202defa86ab08279ed8a31501c51a1b057517aab806551203c0" exitCode=0 Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.778056 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerDied","Data":"8366b4f53588b202defa86ab08279ed8a31501c51a1b057517aab806551203c0"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.782072 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerID="aa5685bc516d6db550494e4d88ddd8fe48813b31062f4470210e7804ed6c4c11" exitCode=0 Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.782135 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerDied","Data":"aa5685bc516d6db550494e4d88ddd8fe48813b31062f4470210e7804ed6c4c11"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.814444 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-67sl6_ceb5f935-ccb2-4449-964f-b48d616eefea/ovn-controller/0.log" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.814536 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-67sl6" event={"ID":"ceb5f935-ccb2-4449-964f-b48d616eefea","Type":"ContainerDied","Data":"db1b6b8ae2034c02ad97d32e7e15ab1eff623ed779f456aecbb254e6a20c01e4"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.814593 4852 scope.go:117] "RemoveContainer" containerID="825a04efd263b838678eba08f4e02f1f1c52911ac0fa52727daaac1d8816d1b8" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.814698 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-67sl6" Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.846676 4852 generic.go:334] "Generic (PLEG): container finished" podID="e113c351-f17d-477e-b671-0510cd03c0b0" containerID="ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" exitCode=0 Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.846753 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e113c351-f17d-477e-b671-0510cd03c0b0","Type":"ContainerDied","Data":"ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.848973 4852 generic.go:334] "Generic (PLEG): container finished" podID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" exitCode=0 Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.849036 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8","Type":"ContainerDied","Data":"ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.851524 4852 generic.go:334] "Generic (PLEG): container finished" podID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" exitCode=0 Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.851641 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2d824719-4789-4d55-a1ec-2602e98d8b53","Type":"ContainerDied","Data":"709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e"} Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.860231 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 11:04:46 crc kubenswrapper[4852]: I0129 11:04:46.866357 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-67sl6"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.086562 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.133774 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.138468 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255262 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vxbj\" (UniqueName: \"kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj\") pod \"e113c351-f17d-477e-b671-0510cd03c0b0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255339 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data\") pod \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255369 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v587\" (UniqueName: \"kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587\") pod \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255430 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom\") pod \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255482 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs\") pod \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255531 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data\") pod \"e113c351-f17d-477e-b671-0510cd03c0b0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255598 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom\") pod \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255626 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data\") pod \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle\") pod \"e113c351-f17d-477e-b671-0510cd03c0b0\" (UID: \"e113c351-f17d-477e-b671-0510cd03c0b0\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255689 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs\") pod \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255740 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrkcf\" (UniqueName: \"kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf\") pod \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255775 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle\") pod \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\" (UID: \"2e7fd5a0-1c61-420f-8da8-fc192c66730b\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.255804 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle\") pod \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\" (UID: \"b8bf5d63-5181-4546-b5c8-94aaac228b1c\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.260893 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj" (OuterVolumeSpecName: "kube-api-access-5vxbj") pod "e113c351-f17d-477e-b671-0510cd03c0b0" (UID: "e113c351-f17d-477e-b671-0510cd03c0b0"). InnerVolumeSpecName "kube-api-access-5vxbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.262954 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs" (OuterVolumeSpecName: "logs") pod "b8bf5d63-5181-4546-b5c8-94aaac228b1c" (UID: "b8bf5d63-5181-4546-b5c8-94aaac228b1c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.262969 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs" (OuterVolumeSpecName: "logs") pod "2e7fd5a0-1c61-420f-8da8-fc192c66730b" (UID: "2e7fd5a0-1c61-420f-8da8-fc192c66730b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.263301 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b8bf5d63-5181-4546-b5c8-94aaac228b1c" (UID: "b8bf5d63-5181-4546-b5c8-94aaac228b1c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.273887 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf" (OuterVolumeSpecName: "kube-api-access-nrkcf") pod "2e7fd5a0-1c61-420f-8da8-fc192c66730b" (UID: "2e7fd5a0-1c61-420f-8da8-fc192c66730b"). InnerVolumeSpecName "kube-api-access-nrkcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.276055 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2e7fd5a0-1c61-420f-8da8-fc192c66730b" (UID: "2e7fd5a0-1c61-420f-8da8-fc192c66730b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.277744 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587" (OuterVolumeSpecName: "kube-api-access-2v587") pod "b8bf5d63-5181-4546-b5c8-94aaac228b1c" (UID: "b8bf5d63-5181-4546-b5c8-94aaac228b1c"). InnerVolumeSpecName "kube-api-access-2v587". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.293165 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e7fd5a0-1c61-420f-8da8-fc192c66730b" (UID: "2e7fd5a0-1c61-420f-8da8-fc192c66730b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.296037 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8bf5d63-5181-4546-b5c8-94aaac228b1c" (UID: "b8bf5d63-5181-4546-b5c8-94aaac228b1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.302438 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data" (OuterVolumeSpecName: "config-data") pod "e113c351-f17d-477e-b671-0510cd03c0b0" (UID: "e113c351-f17d-477e-b671-0510cd03c0b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.303319 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e113c351-f17d-477e-b671-0510cd03c0b0" (UID: "e113c351-f17d-477e-b671-0510cd03c0b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.321715 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data" (OuterVolumeSpecName: "config-data") pod "b8bf5d63-5181-4546-b5c8-94aaac228b1c" (UID: "b8bf5d63-5181-4546-b5c8-94aaac228b1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.329331 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data" (OuterVolumeSpecName: "config-data") pod "2e7fd5a0-1c61-420f-8da8-fc192c66730b" (UID: "2e7fd5a0-1c61-420f-8da8-fc192c66730b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.357983 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358018 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358030 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358042 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7fd5a0-1c61-420f-8da8-fc192c66730b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358053 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrkcf\" (UniqueName: \"kubernetes.io/projected/2e7fd5a0-1c61-420f-8da8-fc192c66730b-kube-api-access-nrkcf\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358063 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358073 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8bf5d63-5181-4546-b5c8-94aaac228b1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358083 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vxbj\" (UniqueName: \"kubernetes.io/projected/e113c351-f17d-477e-b671-0510cd03c0b0-kube-api-access-5vxbj\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358092 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358104 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v587\" (UniqueName: \"kubernetes.io/projected/b8bf5d63-5181-4546-b5c8-94aaac228b1c-kube-api-access-2v587\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358115 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e7fd5a0-1c61-420f-8da8-fc192c66730b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358128 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8bf5d63-5181-4546-b5c8-94aaac228b1c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.358138 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e113c351-f17d-477e-b671-0510cd03c0b0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.473536 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" path="/var/lib/kubelet/pods/1ab8189f-e95a-47b5-a130-5404901974e2/volumes" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.474177 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" path="/var/lib/kubelet/pods/5611a301-79d2-4082-beba-c95db2a2bcad/volumes" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.474728 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" path="/var/lib/kubelet/pods/ceb5f935-ccb2-4449-964f-b48d616eefea/volumes" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.476406 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" path="/var/lib/kubelet/pods/f84fb26d-e835-4d75-95d5-695b6e033bb7/volumes" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.501376 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.662778 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data\") pod \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.663126 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrwpr\" (UniqueName: \"kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr\") pod \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.663195 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle\") pod \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\" (UID: \"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.667781 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr" (OuterVolumeSpecName: "kube-api-access-qrwpr") pod "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" (UID: "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8"). InnerVolumeSpecName "kube-api-access-qrwpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.688861 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data" (OuterVolumeSpecName: "config-data") pod "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" (UID: "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.691667 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" (UID: "786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.752134 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.765325 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.766987 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrwpr\" (UniqueName: \"kubernetes.io/projected/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-kube-api-access-qrwpr\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.767003 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.867661 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle\") pod \"2d824719-4789-4d55-a1ec-2602e98d8b53\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.867749 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data\") pod \"2d824719-4789-4d55-a1ec-2602e98d8b53\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.867794 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4g8l\" (UniqueName: \"kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l\") pod \"2d824719-4789-4d55-a1ec-2602e98d8b53\" (UID: \"2d824719-4789-4d55-a1ec-2602e98d8b53\") " Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.868407 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66d6b946b9-8qp8x" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.868392 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66d6b946b9-8qp8x" event={"ID":"2e7fd5a0-1c61-420f-8da8-fc192c66730b","Type":"ContainerDied","Data":"80a289af8fa836f56def5b749b72ca50195b93576e353b63c8abb46ac6157168"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.868454 4852 scope.go:117] "RemoveContainer" containerID="8366b4f53588b202defa86ab08279ed8a31501c51a1b057517aab806551203c0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.870498 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e113c351-f17d-477e-b671-0510cd03c0b0","Type":"ContainerDied","Data":"e295b6bce5ff0853f56668fbfbc3e69104a3ede022de473e48d93def166b5abb"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.870565 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.874045 4852 generic.go:334] "Generic (PLEG): container finished" podID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerID="b41dd88e530d0078eac9379893c00ca26e6907c39d4da306037e9f16ecf118b4" exitCode=0 Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.874106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerDied","Data":"b41dd88e530d0078eac9379893c00ca26e6907c39d4da306037e9f16ecf118b4"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.874130 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09a768ce-3178-4886-b1fc-8f6aa136a04f","Type":"ContainerDied","Data":"ee38b63940b8c7e58191b4d6b17892bf42579cd381126b237554e32af0499e46"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.874143 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee38b63940b8c7e58191b4d6b17892bf42579cd381126b237554e32af0499e46" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.876004 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8","Type":"ContainerDied","Data":"cc67314fe2591c62afd8046c24d5db7daadbc58a89e4d100b6013ec098757fe8"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.876078 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.885318 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l" (OuterVolumeSpecName: "kube-api-access-v4g8l") pod "2d824719-4789-4d55-a1ec-2602e98d8b53" (UID: "2d824719-4789-4d55-a1ec-2602e98d8b53"). InnerVolumeSpecName "kube-api-access-v4g8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.886069 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.886069 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2d824719-4789-4d55-a1ec-2602e98d8b53","Type":"ContainerDied","Data":"413d1e7d316d1a4b8f32935fdbd849c634e863043fc47253597c844e049d9e91"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.889031 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" event={"ID":"b8bf5d63-5181-4546-b5c8-94aaac228b1c","Type":"ContainerDied","Data":"deb86643e87f681fea98cc6119ea17c4a82d37ebfee1f219c03c61b0f44de3fb"} Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.889097 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6cc75d7564-wfkl2" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.892712 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d824719-4789-4d55-a1ec-2602e98d8b53" (UID: "2d824719-4789-4d55-a1ec-2602e98d8b53"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.898664 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.908017 4852 scope.go:117] "RemoveContainer" containerID="a1df71499e1fd5786e8f8ce1f972f8c5cdb23d2e76c158581681aaf76b9972b9" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.914760 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.922041 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-66d6b946b9-8qp8x"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.927932 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data" (OuterVolumeSpecName: "config-data") pod "2d824719-4789-4d55-a1ec-2602e98d8b53" (UID: "2d824719-4789-4d55-a1ec-2602e98d8b53"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.933809 4852 scope.go:117] "RemoveContainer" containerID="ab85bc6a1a6484cf466c4f1ad11cdb57ea66fb29aeecd1747d29796f8f8f3be3" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.949796 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.955451 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-6cc75d7564-wfkl2"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.965342 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.968982 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4g8l\" (UniqueName: \"kubernetes.io/projected/2d824719-4789-4d55-a1ec-2602e98d8b53-kube-api-access-v4g8l\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.969157 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.969211 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d824719-4789-4d55-a1ec-2602e98d8b53-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.975127 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.980034 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.980369 4852 scope.go:117] "RemoveContainer" containerID="ce66fd88aa84ec29cb886a1e09597e11a967efbf233ee16803af23fba7c2a853" Jan 29 11:04:47 crc kubenswrapper[4852]: I0129 11:04:47.985613 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.061566 4852 scope.go:117] "RemoveContainer" containerID="709acde72b03c061b22e1bd0c273d9fcfbc3dc2afa1fdb28723e0c141e0b101e" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070284 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070366 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070426 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070487 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070560 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070737 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070852 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltpts\" (UniqueName: \"kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.070940 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml\") pod \"09a768ce-3178-4886-b1fc-8f6aa136a04f\" (UID: \"09a768ce-3178-4886-b1fc-8f6aa136a04f\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.072121 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.072227 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.075757 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts" (OuterVolumeSpecName: "kube-api-access-ltpts") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "kube-api-access-ltpts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.085478 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts" (OuterVolumeSpecName: "scripts") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.088814 4852 scope.go:117] "RemoveContainer" containerID="aa5685bc516d6db550494e4d88ddd8fe48813b31062f4470210e7804ed6c4c11" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.110375 4852 scope.go:117] "RemoveContainer" containerID="430bb8180e977735b3830ae9b80ddb4c04224564e891d55f48ab0ca2914dbb58" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.113575 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.116271 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.134390 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.161404 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data" (OuterVolumeSpecName: "config-data") pod "09a768ce-3178-4886-b1fc-8f6aa136a04f" (UID: "09a768ce-3178-4886-b1fc-8f6aa136a04f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172385 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172428 4852 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172440 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09a768ce-3178-4886-b1fc-8f6aa136a04f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172449 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172457 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172465 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172474 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltpts\" (UniqueName: \"kubernetes.io/projected/09a768ce-3178-4886-b1fc-8f6aa136a04f-kube-api-access-ltpts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.172483 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09a768ce-3178-4886-b1fc-8f6aa136a04f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.224648 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.232304 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.267043 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 is running failed: container process not found" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.267410 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 is running failed: container process not found" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.267840 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 is running failed: container process not found" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.267901 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="galera" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.428091 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7f4fbff985-ww2n4" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.166:9696/\": dial tcp 10.217.0.166:9696: connect: connection refused" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.844893 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.902787 4852 generic.go:334] "Generic (PLEG): container finished" podID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" exitCode=0 Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.902848 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.902870 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerDied","Data":"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2"} Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.903493 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"41ebdede-a3fa-41b6-9c03-dffdba9a112b","Type":"ContainerDied","Data":"c8f2afa6863e6acca40aecd7202098e0c30981e2bce86b00a022ae6d2e88ad23"} Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.903518 4852 scope.go:117] "RemoveContainer" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.915398 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.931493 4852 scope.go:117] "RemoveContainer" containerID="86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.958698 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.961887 4852 scope.go:117] "RemoveContainer" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.962310 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2\": container with ID starting with 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 not found: ID does not exist" containerID="1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.962342 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2"} err="failed to get container status \"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2\": rpc error: code = NotFound desc = could not find container \"1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2\": container with ID starting with 1b918ea6a9845bbc11267b096dae64cabc979f4841dc3b830b0538a99d4afaf2 not found: ID does not exist" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.962365 4852 scope.go:117] "RemoveContainer" containerID="86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572" Jan 29 11:04:48 crc kubenswrapper[4852]: E0129 11:04:48.962610 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572\": container with ID starting with 86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572 not found: ID does not exist" containerID="86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.962633 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572"} err="failed to get container status \"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572\": rpc error: code = NotFound desc = could not find container \"86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572\": container with ID starting with 86a4727e32c901c5e6cb4285bb2288ef79529c930bb936ff0e10bc6b1c95f572 not found: ID does not exist" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.965229 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983366 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983403 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983434 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983459 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983488 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scqnv\" (UniqueName: \"kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983552 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.983568 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default\") pod \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\" (UID: \"41ebdede-a3fa-41b6-9c03-dffdba9a112b\") " Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.984715 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.985152 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.985919 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.986803 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.991360 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv" (OuterVolumeSpecName: "kube-api-access-scqnv") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "kube-api-access-scqnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:48 crc kubenswrapper[4852]: I0129 11:04:48.998087 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.019952 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.037466 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "41ebdede-a3fa-41b6-9c03-dffdba9a112b" (UID: "41ebdede-a3fa-41b6-9c03-dffdba9a112b"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085393 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085437 4852 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085447 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scqnv\" (UniqueName: \"kubernetes.io/projected/41ebdede-a3fa-41b6-9c03-dffdba9a112b-kube-api-access-scqnv\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085460 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085468 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41ebdede-a3fa-41b6-9c03-dffdba9a112b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085478 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/41ebdede-a3fa-41b6-9c03-dffdba9a112b-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085486 4852 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/41ebdede-a3fa-41b6-9c03-dffdba9a112b-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.085518 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.100509 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.187239 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.265955 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.275614 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.474144 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" path="/var/lib/kubelet/pods/09a768ce-3178-4886-b1fc-8f6aa136a04f/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.475306 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" path="/var/lib/kubelet/pods/2d824719-4789-4d55-a1ec-2602e98d8b53/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.476125 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" path="/var/lib/kubelet/pods/2e7fd5a0-1c61-420f-8da8-fc192c66730b/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.477844 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" path="/var/lib/kubelet/pods/41ebdede-a3fa-41b6-9c03-dffdba9a112b/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.478598 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" path="/var/lib/kubelet/pods/786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.479874 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" path="/var/lib/kubelet/pods/b8bf5d63-5181-4546-b5c8-94aaac228b1c/volumes" Jan 29 11:04:49 crc kubenswrapper[4852]: I0129 11:04:49.480691 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" path="/var/lib/kubelet/pods/e113c351-f17d-477e-b671-0510cd03c0b0/volumes" Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.300032 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.300651 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.301054 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.301092 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.301879 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.304015 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.306817 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:50 crc kubenswrapper[4852]: E0129 11:04:50.306909 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:04:53 crc kubenswrapper[4852]: E0129 11:04:53.522658 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2683963a_32cd_488b_84f8_9222fc66a2b2.slice/crio-d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2683963a_32cd_488b_84f8_9222fc66a2b2.slice/crio-conmon-d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173.scope\": RecentStats: unable to find data in memory cache]" Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.898422 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963014 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963066 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963088 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pln8b\" (UniqueName: \"kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963116 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963169 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963196 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.963280 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config\") pod \"2683963a-32cd-488b-84f8-9222fc66a2b2\" (UID: \"2683963a-32cd-488b-84f8-9222fc66a2b2\") " Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.971627 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b" (OuterVolumeSpecName: "kube-api-access-pln8b") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "kube-api-access-pln8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.981170 4852 generic.go:334] "Generic (PLEG): container finished" podID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerID="d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173" exitCode=0 Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.981401 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f4fbff985-ww2n4" Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.981447 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerDied","Data":"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173"} Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.981938 4852 scope.go:117] "RemoveContainer" containerID="82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6" Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.981772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f4fbff985-ww2n4" event={"ID":"2683963a-32cd-488b-84f8-9222fc66a2b2","Type":"ContainerDied","Data":"3b524eed80f85c37c54ee5c4f63066909b50a5307e2c22131f8067b6b9e09b98"} Jan 29 11:04:53 crc kubenswrapper[4852]: I0129 11:04:53.987349 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.023061 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.023152 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.032475 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config" (OuterVolumeSpecName: "config") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.032824 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.044020 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "2683963a-32cd-488b-84f8-9222fc66a2b2" (UID: "2683963a-32cd-488b-84f8-9222fc66a2b2"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.064864 4852 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065108 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065274 4852 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065353 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065528 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pln8b\" (UniqueName: \"kubernetes.io/projected/2683963a-32cd-488b-84f8-9222fc66a2b2-kube-api-access-pln8b\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065683 4852 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.065811 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/2683963a-32cd-488b-84f8-9222fc66a2b2-config\") on node \"crc\" DevicePath \"\"" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.078805 4852 scope.go:117] "RemoveContainer" containerID="d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.099678 4852 scope.go:117] "RemoveContainer" containerID="82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6" Jan 29 11:04:54 crc kubenswrapper[4852]: E0129 11:04:54.100265 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6\": container with ID starting with 82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6 not found: ID does not exist" containerID="82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.100296 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6"} err="failed to get container status \"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6\": rpc error: code = NotFound desc = could not find container \"82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6\": container with ID starting with 82798bbca6a73f6a56aabfbb522cc5b09081e7682e1b0c1ea52f6351a3c5fdf6 not found: ID does not exist" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.100319 4852 scope.go:117] "RemoveContainer" containerID="d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173" Jan 29 11:04:54 crc kubenswrapper[4852]: E0129 11:04:54.100860 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173\": container with ID starting with d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173 not found: ID does not exist" containerID="d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.100908 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173"} err="failed to get container status \"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173\": rpc error: code = NotFound desc = could not find container \"d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173\": container with ID starting with d147ddaad4567a2e669ce9d463ba2ab4461aec9db521b43b30916975f642e173 not found: ID does not exist" Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.322885 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:04:54 crc kubenswrapper[4852]: I0129 11:04:54.328890 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f4fbff985-ww2n4"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.300253 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.301024 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.301451 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.301500 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.302117 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.304381 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.306079 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:04:55 crc kubenswrapper[4852]: E0129 11:04:55.306157 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:04:55 crc kubenswrapper[4852]: I0129 11:04:55.478908 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" path="/var/lib/kubelet/pods/2683963a-32cd-488b-84f8-9222fc66a2b2/volumes" Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.300405 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.302487 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.302556 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.303404 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.303622 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.304976 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.306268 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:00 crc kubenswrapper[4852]: E0129 11:05:00.306326 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.299683 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.301490 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.301661 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.301761 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.301787 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.303161 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.304498 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 11:05:05 crc kubenswrapper[4852]: E0129 11:05:05.304569 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k8pcs" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.219682 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k8pcs_c5bb0b97-62ba-4918-9cf8-b8659b028571/ovs-vswitchd/0.log" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.220878 4852 generic.go:334] "Generic (PLEG): container finished" podID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" exitCode=137 Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.220949 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerDied","Data":"856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1"} Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.231158 4852 generic.go:334] "Generic (PLEG): container finished" podID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerID="73859270b6703e319bb738155b60b5da8025987a2cbf2f4800261c79942db2e5" exitCode=137 Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.231215 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"73859270b6703e319bb738155b60b5da8025987a2cbf2f4800261c79942db2e5"} Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.569136 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k8pcs_c5bb0b97-62ba-4918-9cf8-b8659b028571/ovs-vswitchd/0.log" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.569876 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.578934 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594464 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594505 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594619 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594626 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run" (OuterVolumeSpecName: "var-run") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594694 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594731 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log" (OuterVolumeSpecName: "var-log") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594754 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66g9p\" (UniqueName: \"kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.594780 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib\") pod \"c5bb0b97-62ba-4918-9cf8-b8659b028571\" (UID: \"c5bb0b97-62ba-4918-9cf8-b8659b028571\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.595437 4852 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.595459 4852 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.595470 4852 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.595496 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib" (OuterVolumeSpecName: "var-lib") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.596154 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts" (OuterVolumeSpecName: "scripts") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.603894 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p" (OuterVolumeSpecName: "kube-api-access-66g9p") pod "c5bb0b97-62ba-4918-9cf8-b8659b028571" (UID: "c5bb0b97-62ba-4918-9cf8-b8659b028571"). InnerVolumeSpecName "kube-api-access-66g9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696525 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696561 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696608 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696628 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlh4f\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696676 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696706 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache\") pod \"8111fd43-32e9-4654-bf8e-444fbce4933a\" (UID: \"8111fd43-32e9-4654-bf8e-444fbce4933a\") " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696872 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5bb0b97-62ba-4918-9cf8-b8659b028571-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696907 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66g9p\" (UniqueName: \"kubernetes.io/projected/c5bb0b97-62ba-4918-9cf8-b8659b028571-kube-api-access-66g9p\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.696917 4852 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5bb0b97-62ba-4918-9cf8-b8659b028571-var-lib\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.698455 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock" (OuterVolumeSpecName: "lock") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.698530 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache" (OuterVolumeSpecName: "cache") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.700743 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "swift") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.701451 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.702070 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f" (OuterVolumeSpecName: "kube-api-access-qlh4f") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "kube-api-access-qlh4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.798468 4852 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-cache\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.798501 4852 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8111fd43-32e9-4654-bf8e-444fbce4933a-lock\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.798512 4852 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.798522 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlh4f\" (UniqueName: \"kubernetes.io/projected/8111fd43-32e9-4654-bf8e-444fbce4933a-kube-api-access-qlh4f\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.798549 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.812213 4852 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.899618 4852 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:08 crc kubenswrapper[4852]: I0129 11:05:08.956208 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8111fd43-32e9-4654-bf8e-444fbce4933a" (UID: "8111fd43-32e9-4654-bf8e-444fbce4933a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.001614 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8111fd43-32e9-4654-bf8e-444fbce4933a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.240479 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k8pcs_c5bb0b97-62ba-4918-9cf8-b8659b028571/ovs-vswitchd/0.log" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.241312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k8pcs" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.241311 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k8pcs" event={"ID":"c5bb0b97-62ba-4918-9cf8-b8659b028571","Type":"ContainerDied","Data":"afd512161435d268479ef8106220a08c99eb5b9d26eff603c82cb8dac6665b1c"} Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.241361 4852 scope.go:117] "RemoveContainer" containerID="856d6788cfb2d0733cde1a11d5e510c85f67a413abfbb9feb6341c5997ed19d1" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.249372 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8111fd43-32e9-4654-bf8e-444fbce4933a","Type":"ContainerDied","Data":"cfd5b611022855b6aed693e3fd283814cd3a136d7984ad171086e9af275242c4"} Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.249473 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.285561 4852 scope.go:117] "RemoveContainer" containerID="7869f5333a5334edaea71fd6c9d93c03b2fe537c7a6a9570234c038603202b4f" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.289362 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.312632 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-k8pcs"] Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.318302 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.319659 4852 scope.go:117] "RemoveContainer" containerID="0bf6c5dd03fd6115ea46a72029c74c47503ea5da2617ca4acd6c358b19f7581e" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.327831 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.346774 4852 scope.go:117] "RemoveContainer" containerID="73859270b6703e319bb738155b60b5da8025987a2cbf2f4800261c79942db2e5" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.366351 4852 scope.go:117] "RemoveContainer" containerID="ce1f7c6d351cc99e3313d53f3d8f5133e907d6c87aab097b279a18222b571462" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.386185 4852 scope.go:117] "RemoveContainer" containerID="2fb8dded90eb3884703a4aa309816c0eff0cdf02427346e9d34e49253bf4d662" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.400734 4852 scope.go:117] "RemoveContainer" containerID="907bf322df4410dc6b7ff975343a603805ddab5bdea051b8b9a9717eb895ca80" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.420498 4852 scope.go:117] "RemoveContainer" containerID="45528cdeb850649069e891209d9ca38b5ce8b5d0110cd02108c6b5f6abe281fc" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.440924 4852 scope.go:117] "RemoveContainer" containerID="1e2b102b24f601e91c5e39ae16f39b46498693458c4fead72ba2b77aa8d49771" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.460818 4852 scope.go:117] "RemoveContainer" containerID="dde3e5fe58352e0cc69a3b45408e08dda15923a3ecec816e83910494c6735af4" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.473370 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" path="/var/lib/kubelet/pods/8111fd43-32e9-4654-bf8e-444fbce4933a/volumes" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.475415 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" path="/var/lib/kubelet/pods/c5bb0b97-62ba-4918-9cf8-b8659b028571/volumes" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.478283 4852 scope.go:117] "RemoveContainer" containerID="1da31bf3d6d70a3d5937cf86e2d07be3913f158a9179ef5de6c23c100b7e5517" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.497815 4852 scope.go:117] "RemoveContainer" containerID="91fe6b60918e96d60b8de169eade7e1727fc0ccc381141ea808831c289639ab8" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.516999 4852 scope.go:117] "RemoveContainer" containerID="7d276e426302c43e751c21a713a36f854cb3218920539f9fed9da38f45d520ce" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.534570 4852 scope.go:117] "RemoveContainer" containerID="118a6805e58988df9a38c6169f10a35d7f949be36a831d95306796e4b1348a45" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.551457 4852 scope.go:117] "RemoveContainer" containerID="ac4d599adbc91bf89af64ad4f64d8683dca701a7383cd3db396529b7ca9ceeec" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.571930 4852 scope.go:117] "RemoveContainer" containerID="e7228e8da4a774db3b5290c30cdafed9405b8034f11e829f2c3ac803d946e4c3" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.596148 4852 scope.go:117] "RemoveContainer" containerID="7d958b8db8a90d2d6554c92f9750a01a932e5397d4686c338494a75d5e717c07" Jan 29 11:05:09 crc kubenswrapper[4852]: I0129 11:05:09.615007 4852 scope.go:117] "RemoveContainer" containerID="761316244eb0a26173100890d918cc7c1799abc67c0ec48e5c29ebc05dc7ed29" Jan 29 11:05:45 crc kubenswrapper[4852]: I0129 11:05:45.513795 4852 scope.go:117] "RemoveContainer" containerID="5a4569a0c66938bb15ae418dc4474095bccb853bb517e38eb2c06c8c521e60aa" Jan 29 11:05:45 crc kubenswrapper[4852]: I0129 11:05:45.548572 4852 scope.go:117] "RemoveContainer" containerID="71d22b58d4dc22fc8a888b0e9739ec5f63964d6176341a6a37e66f1cb7ee656c" Jan 29 11:06:30 crc kubenswrapper[4852]: I0129 11:06:30.017288 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:06:30 crc kubenswrapper[4852]: I0129 11:06:30.017981 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.798526 4852 scope.go:117] "RemoveContainer" containerID="3cb74d83906176b3368d7874d4dc52bac2342a925e464506e720f7daf2fd6b77" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.835340 4852 scope.go:117] "RemoveContainer" containerID="b4d4086f1f38fc735421b6e2ec75b96f1944a10c6bc1cbca217bbc72da3c061e" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.852039 4852 scope.go:117] "RemoveContainer" containerID="70a827e98d10c665afd0a9c14f88cac00a5f0d5fc4a7e56e136ce0c3ffd21760" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.878571 4852 scope.go:117] "RemoveContainer" containerID="a05938cf7aa43400ac64d077c4f27bd8aac2609da89a2cc7553fff6b92d2605d" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.905194 4852 scope.go:117] "RemoveContainer" containerID="f85d4051ca659acb4d0c6433794c8cd8c38b06955cbcf113f383525e8fba6fcf" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.939702 4852 scope.go:117] "RemoveContainer" containerID="cfc48cd686d7714ca17560f6be7658f618fc2eb927f3f91dac0d4e3c37a9cb42" Jan 29 11:06:45 crc kubenswrapper[4852]: I0129 11:06:45.985413 4852 scope.go:117] "RemoveContainer" containerID="65d76a68381aa7646e3657d86bd34071b3d95cc3ff4545da7a03719e60815981" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.010391 4852 scope.go:117] "RemoveContainer" containerID="f196aaf25763e1b86b3c1a188cfcfddffdbe783ec9c27d57363fa169a5176bff" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.027116 4852 scope.go:117] "RemoveContainer" containerID="028709c4503babddc3871b55ba930b2f3b728ca6b6eab06a923d621095f431b9" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.044261 4852 scope.go:117] "RemoveContainer" containerID="b1c723036342d80c512f029c8c3089fe19ed55b50afbd96280c5754ddc399172" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.059874 4852 scope.go:117] "RemoveContainer" containerID="618d49b42c8b4f757aea30017d84f7a653b8d140ff6d575dc8f81c48d2cdf5f7" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.078807 4852 scope.go:117] "RemoveContainer" containerID="655c5a497b7193f8e8b2150d018d78c5d4b8e8338aa5d573bffd5c89cd5db084" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.100121 4852 scope.go:117] "RemoveContainer" containerID="aaa67bc497ed14030c557d246990a4b25295868ce6b77d9d0fb9b2cd5f846d7e" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.119006 4852 scope.go:117] "RemoveContainer" containerID="f58ebcfb0f54e201d9318554d9cf3f7b0ed67554f4ba40579db34622110ae1b7" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.142298 4852 scope.go:117] "RemoveContainer" containerID="512a948f46c227dae55a3847babe03fbe428370e46c983712358f2759595f57f" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.161911 4852 scope.go:117] "RemoveContainer" containerID="5b404d0a771ad698caf04956b2e2d5bcaa400691da0f35ce44408eb4b17c9d32" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.196377 4852 scope.go:117] "RemoveContainer" containerID="72626e75fb57ea81ee5bbbd47796aad884f9aa87f9e40895da456464b3723a68" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.212880 4852 scope.go:117] "RemoveContainer" containerID="5d0052d8fbb2c502a21137e2acb2a4c06f83feef0268e7f3910bd71706399868" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.234752 4852 scope.go:117] "RemoveContainer" containerID="c38cfe0f0915cd81beb71287177a725e2ee7ef8ebc102e10838801dde8803cf8" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.263814 4852 scope.go:117] "RemoveContainer" containerID="7379f76291c94d8138e885354e745cb48b5bb3976bd4637906d7a83e5afd89e9" Jan 29 11:06:46 crc kubenswrapper[4852]: I0129 11:06:46.291498 4852 scope.go:117] "RemoveContainer" containerID="f82ea28adea60d3d3b85593db30daee168785cf0621ab4ad5ab37f1c6759fb5b" Jan 29 11:07:00 crc kubenswrapper[4852]: I0129 11:07:00.022066 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:07:00 crc kubenswrapper[4852]: I0129 11:07:00.022768 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.389329 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390398 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390417 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390442 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390454 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390471 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" containerName="keystone-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390482 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" containerName="keystone-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390494 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390503 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-server" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390522 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="setup-container" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390531 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="setup-container" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390544 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390555 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390577 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b14960-4f7f-465e-8e53-96a14875878e" containerName="kube-state-metrics" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390611 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b14960-4f7f-465e-8e53-96a14875878e" containerName="kube-state-metrics" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390631 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390641 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390654 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="sg-core" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390667 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="sg-core" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390680 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-notification-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390690 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-notification-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390706 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390717 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390731 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390742 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390764 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="swift-recon-cron" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390776 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="swift-recon-cron" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390794 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390805 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390816 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390828 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390845 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server-init" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390855 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server-init" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390873 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390883 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390899 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390909 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390928 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390938 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390954 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-metadata" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390964 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-metadata" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.390977 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.390987 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391004 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-reaper" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391016 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-reaper" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391034 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="mysql-bootstrap" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391043 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="mysql-bootstrap" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391062 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" containerName="memcached" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391075 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" containerName="memcached" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391088 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391099 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391119 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="setup-container" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391130 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="setup-container" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391145 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391155 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-server" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391170 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391179 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391196 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391206 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391220 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-expirer" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391244 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-expirer" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391258 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391268 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-server" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391286 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391297 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391313 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391322 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391342 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391353 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391368 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391377 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391389 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391400 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391414 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="galera" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391424 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="galera" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391439 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391450 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391466 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="proxy-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391476 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="proxy-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391492 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391502 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391517 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391529 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391547 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391558 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391576 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391717 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391737 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391746 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391758 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" containerName="nova-cell1-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391769 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" containerName="nova-cell1-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391781 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391791 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391805 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391815 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391833 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391844 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391855 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-central-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391866 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-central-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391882 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391890 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391904 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391915 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391928 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391938 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391958 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391968 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.391978 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="rsync" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.391988 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="rsync" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.392003 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392014 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.392033 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392042 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392292 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f84fb26d-e835-4d75-95d5-695b6e033bb7" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392311 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392331 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392350 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392361 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392372 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392386 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392400 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392412 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovs-vswitchd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392423 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e113c351-f17d-477e-b671-0510cd03c0b0" containerName="nova-cell1-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392437 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="786f3a4d-fe5e-42a2-84f7-7e8b3bd038f8" containerName="nova-scheduler-scheduler" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392446 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-notification-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392458 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392470 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392483 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d824719-4789-4d55-a1ec-2602e98d8b53" containerName="nova-cell0-conductor-conductor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392496 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392510 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392520 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="99bfcf5b-7ab7-4c29-93ed-e7661d26a7c2" containerName="nova-metadata-metadata" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392533 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392547 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392555 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-reaper" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392566 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392577 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="65f47530-2db1-46a2-84fa-dde28af57083" containerName="placement-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392618 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce70759d-e206-41b9-b8d2-52a8ca74f67c" containerName="memcached" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392635 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="swift-recon-cron" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392647 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392658 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392672 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="41ebdede-a3fa-41b6-9c03-dffdba9a112b" containerName="galera" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392680 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392691 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bb0b97-62ba-4918-9cf8-b8659b028571" containerName="ovsdb-server" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392700 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a29eddc2-c6bd-46c0-ba00-5a08b8b6793e" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392711 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e7fd5a0-1c61-420f-8da8-fc192c66730b" containerName="barbican-worker-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392725 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb5f935-ccb2-4449-964f-b48d616eefea" containerName="ovn-controller" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392734 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="232868d3-4c67-4820-b75c-e90009acf440" containerName="glance-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392746 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ab8189f-e95a-47b5-a130-5404901974e2" containerName="rabbitmq" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392758 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392769 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b14960-4f7f-465e-8e53-96a14875878e" containerName="kube-state-metrics" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392783 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2683963a-32cd-488b-84f8-9222fc66a2b2" containerName="neutron-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392795 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd5133c-258f-4725-9da4-17941a408af8" containerName="barbican-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392827 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="ceilometer-central-agent" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392838 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e967d95c-8de4-4167-82ef-1b32f6026476" containerName="cinder-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392850 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="sg-core" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392863 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5611a301-79d2-4082-beba-c95db2a2bcad" containerName="keystone-api" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392884 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-replicator" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392894 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392903 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8bf5d63-5181-4546-b5c8-94aaac228b1c" containerName="barbican-keystone-listener-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392913 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="container-auditor" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392922 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="rsync" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392934 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09a768ce-3178-4886-b1fc-8f6aa136a04f" containerName="proxy-httpd" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392946 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a31f5d33-4598-4ecb-9b2c-fc8271e8d29e" containerName="nova-api-log" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392959 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-updater" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392968 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="object-expirer" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.392976 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8111fd43-32e9-4654-bf8e-444fbce4933a" containerName="account-server" Jan 29 11:07:22 crc kubenswrapper[4852]: E0129 11:07:22.393157 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.393167 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dab3827-3a1d-45f0-a021-d0ef66b37f0a" containerName="mariadb-account-create-update" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.394330 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.398027 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.509228 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcq58\" (UniqueName: \"kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.509680 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.509756 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.611356 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.611459 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcq58\" (UniqueName: \"kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.611568 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.612278 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.612311 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.634620 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcq58\" (UniqueName: \"kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58\") pod \"certified-operators-j78bj\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:22 crc kubenswrapper[4852]: I0129 11:07:22.722978 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:23 crc kubenswrapper[4852]: I0129 11:07:23.171627 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:23 crc kubenswrapper[4852]: I0129 11:07:23.706513 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerID="a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344" exitCode=0 Jan 29 11:07:23 crc kubenswrapper[4852]: I0129 11:07:23.706601 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerDied","Data":"a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344"} Jan 29 11:07:23 crc kubenswrapper[4852]: I0129 11:07:23.706950 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerStarted","Data":"e57da5b1b3fe42c64f7711e494a5debb708af715de84d5d7c660224d0cd12ff8"} Jan 29 11:07:25 crc kubenswrapper[4852]: I0129 11:07:25.765505 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerID="5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4" exitCode=0 Jan 29 11:07:25 crc kubenswrapper[4852]: I0129 11:07:25.765738 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerDied","Data":"5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4"} Jan 29 11:07:26 crc kubenswrapper[4852]: I0129 11:07:26.776385 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerStarted","Data":"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43"} Jan 29 11:07:26 crc kubenswrapper[4852]: I0129 11:07:26.801061 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j78bj" podStartSLOduration=2.345046116 podStartE2EDuration="4.801037361s" podCreationTimestamp="2026-01-29 11:07:22 +0000 UTC" firstStartedPulling="2026-01-29 11:07:23.710039728 +0000 UTC m=+1540.927370902" lastFinishedPulling="2026-01-29 11:07:26.166031003 +0000 UTC m=+1543.383362147" observedRunningTime="2026-01-29 11:07:26.796026878 +0000 UTC m=+1544.013358032" watchObservedRunningTime="2026-01-29 11:07:26.801037361 +0000 UTC m=+1544.018368495" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.016735 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.017233 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.017303 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.018264 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.018374 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" gracePeriod=600 Jan 29 11:07:30 crc kubenswrapper[4852]: E0129 11:07:30.644470 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.833005 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" exitCode=0 Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.833078 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f"} Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.833150 4852 scope.go:117] "RemoveContainer" containerID="d7c3f62d61ba87d851460d7d26afa6e186fa6847967e9524e9452f3e890a1087" Jan 29 11:07:30 crc kubenswrapper[4852]: I0129 11:07:30.833880 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:07:30 crc kubenswrapper[4852]: E0129 11:07:30.834352 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:07:32 crc kubenswrapper[4852]: I0129 11:07:32.724254 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:32 crc kubenswrapper[4852]: I0129 11:07:32.724320 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:32 crc kubenswrapper[4852]: I0129 11:07:32.772742 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:32 crc kubenswrapper[4852]: I0129 11:07:32.910715 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:33 crc kubenswrapper[4852]: I0129 11:07:33.011534 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:34 crc kubenswrapper[4852]: I0129 11:07:34.866322 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j78bj" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="registry-server" containerID="cri-o://6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43" gracePeriod=2 Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.335287 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.410325 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities\") pod \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.410860 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcq58\" (UniqueName: \"kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58\") pod \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.410966 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content\") pod \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\" (UID: \"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc\") " Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.411463 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities" (OuterVolumeSpecName: "utilities") pod "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" (UID: "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.418615 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58" (OuterVolumeSpecName: "kube-api-access-rcq58") pod "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" (UID: "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc"). InnerVolumeSpecName "kube-api-access-rcq58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.478643 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" (UID: "1b2eaf72-d989-4c02-96ae-5da7c0eee4bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.512663 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.512704 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.512718 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcq58\" (UniqueName: \"kubernetes.io/projected/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc-kube-api-access-rcq58\") on node \"crc\" DevicePath \"\"" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.879202 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerID="6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43" exitCode=0 Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.879257 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerDied","Data":"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43"} Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.879299 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j78bj" event={"ID":"1b2eaf72-d989-4c02-96ae-5da7c0eee4bc","Type":"ContainerDied","Data":"e57da5b1b3fe42c64f7711e494a5debb708af715de84d5d7c660224d0cd12ff8"} Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.879307 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j78bj" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.879319 4852 scope.go:117] "RemoveContainer" containerID="6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.903626 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.911282 4852 scope.go:117] "RemoveContainer" containerID="5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.913083 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j78bj"] Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.944455 4852 scope.go:117] "RemoveContainer" containerID="a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.965649 4852 scope.go:117] "RemoveContainer" containerID="6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43" Jan 29 11:07:35 crc kubenswrapper[4852]: E0129 11:07:35.966262 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43\": container with ID starting with 6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43 not found: ID does not exist" containerID="6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.966308 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43"} err="failed to get container status \"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43\": rpc error: code = NotFound desc = could not find container \"6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43\": container with ID starting with 6473a0d62727418f2608f4a0a6350c748033bc4bff90534736369fd934460d43 not found: ID does not exist" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.966340 4852 scope.go:117] "RemoveContainer" containerID="5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4" Jan 29 11:07:35 crc kubenswrapper[4852]: E0129 11:07:35.966712 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4\": container with ID starting with 5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4 not found: ID does not exist" containerID="5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.966786 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4"} err="failed to get container status \"5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4\": rpc error: code = NotFound desc = could not find container \"5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4\": container with ID starting with 5d47564d789036b24a64a4a2059e737d3e1746f9dd836be36cf7c3a395f297c4 not found: ID does not exist" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.966831 4852 scope.go:117] "RemoveContainer" containerID="a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344" Jan 29 11:07:35 crc kubenswrapper[4852]: E0129 11:07:35.967243 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344\": container with ID starting with a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344 not found: ID does not exist" containerID="a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344" Jan 29 11:07:35 crc kubenswrapper[4852]: I0129 11:07:35.967272 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344"} err="failed to get container status \"a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344\": rpc error: code = NotFound desc = could not find container \"a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344\": container with ID starting with a2e61df1291d6f7e86f87f7e408483c1f2d46a6cd164f14bb644f14dbc53a344 not found: ID does not exist" Jan 29 11:07:37 crc kubenswrapper[4852]: I0129 11:07:37.475489 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" path="/var/lib/kubelet/pods/1b2eaf72-d989-4c02-96ae-5da7c0eee4bc/volumes" Jan 29 11:07:41 crc kubenswrapper[4852]: I0129 11:07:41.464101 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:07:41 crc kubenswrapper[4852]: E0129 11:07:41.464414 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:07:46 crc kubenswrapper[4852]: I0129 11:07:46.693720 4852 scope.go:117] "RemoveContainer" containerID="b8bd41006cef6fa1ebbdb27fa01c94f2f989cd072c9a79042467310c603afff2" Jan 29 11:07:46 crc kubenswrapper[4852]: I0129 11:07:46.742078 4852 scope.go:117] "RemoveContainer" containerID="1f82e67cdf0a37bc15bd82c2d397a0472b885d845d15fea79033f1d869c2c63a" Jan 29 11:07:46 crc kubenswrapper[4852]: I0129 11:07:46.781745 4852 scope.go:117] "RemoveContainer" containerID="1f00cf17358b6899eebd1eaba2bbf4f8b138d64c109c97c9a622904c07013291" Jan 29 11:07:46 crc kubenswrapper[4852]: I0129 11:07:46.813048 4852 scope.go:117] "RemoveContainer" containerID="d105533ecc823dceee85f7dd1ee79ff7664e044b9219ccc2db75fb06d4b37611" Jan 29 11:07:53 crc kubenswrapper[4852]: I0129 11:07:53.466832 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:07:53 crc kubenswrapper[4852]: E0129 11:07:53.467512 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:04 crc kubenswrapper[4852]: I0129 11:08:04.463592 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:08:04 crc kubenswrapper[4852]: E0129 11:08:04.464536 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:16 crc kubenswrapper[4852]: I0129 11:08:16.463996 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:08:16 crc kubenswrapper[4852]: E0129 11:08:16.465333 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:28 crc kubenswrapper[4852]: I0129 11:08:28.464613 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:08:28 crc kubenswrapper[4852]: E0129 11:08:28.465490 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:39 crc kubenswrapper[4852]: I0129 11:08:39.463807 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:08:39 crc kubenswrapper[4852]: E0129 11:08:39.464922 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:46 crc kubenswrapper[4852]: I0129 11:08:46.928493 4852 scope.go:117] "RemoveContainer" containerID="f13b266639c225546baf5b7b3099ea5770dc5dd5ff4c67f836f6b1c6c8367019" Jan 29 11:08:46 crc kubenswrapper[4852]: I0129 11:08:46.990125 4852 scope.go:117] "RemoveContainer" containerID="9e2fbd4b9daaa437f8f321cc9702964e5924353cd1b5d954ec556bcd5e7b8cfd" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.027372 4852 scope.go:117] "RemoveContainer" containerID="56adf8995d6d9f105e72928c811318e2027d2ada0d9e9d4dbc0b3cd7738924ab" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.058832 4852 scope.go:117] "RemoveContainer" containerID="08de13d6a892eddfd8ef7e195228d8f6b0c049491e81bb2ad93d8fa4eb38e23c" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.088045 4852 scope.go:117] "RemoveContainer" containerID="153dabdfcf20ad54b10959d9a6c480c6fa8781e5c6362e50144c73daff82f857" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.112578 4852 scope.go:117] "RemoveContainer" containerID="1316e66541931581dfec528c6c6d4c09ab594c33c249d5a27bc94106e0e932b3" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.139001 4852 scope.go:117] "RemoveContainer" containerID="0c1cc19179c9804cc80fd8b48b39876977813981c876f20d3c7e8de9ed98b401" Jan 29 11:08:47 crc kubenswrapper[4852]: I0129 11:08:47.163640 4852 scope.go:117] "RemoveContainer" containerID="da9aa472fe83d92b938cf2bd579ba1249170e8f5d51d10d02034d707ea308d31" Jan 29 11:08:52 crc kubenswrapper[4852]: I0129 11:08:52.463809 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:08:52 crc kubenswrapper[4852]: E0129 11:08:52.464945 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.716714 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:08:59 crc kubenswrapper[4852]: E0129 11:08:59.720055 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="extract-utilities" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.720347 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="extract-utilities" Jan 29 11:08:59 crc kubenswrapper[4852]: E0129 11:08:59.721022 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="extract-content" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.721258 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="extract-content" Jan 29 11:08:59 crc kubenswrapper[4852]: E0129 11:08:59.721469 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="registry-server" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.721767 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="registry-server" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.722453 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b2eaf72-d989-4c02-96ae-5da7c0eee4bc" containerName="registry-server" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.726162 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.738718 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.825820 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.825886 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgnh8\" (UniqueName: \"kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.826064 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.927943 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.928020 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgnh8\" (UniqueName: \"kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.928114 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.928605 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.928777 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:08:59 crc kubenswrapper[4852]: I0129 11:08:59.951525 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgnh8\" (UniqueName: \"kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8\") pod \"community-operators-h8xw4\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:00 crc kubenswrapper[4852]: I0129 11:09:00.087125 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:00 crc kubenswrapper[4852]: I0129 11:09:00.607518 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:09:00 crc kubenswrapper[4852]: I0129 11:09:00.727575 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerStarted","Data":"c5a35eb1daebaf6bb2ce4f754584f03c61a24845a3cc183e582b3ae70958f291"} Jan 29 11:09:01 crc kubenswrapper[4852]: I0129 11:09:01.738064 4852 generic.go:334] "Generic (PLEG): container finished" podID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerID="5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec" exitCode=0 Jan 29 11:09:01 crc kubenswrapper[4852]: I0129 11:09:01.738119 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerDied","Data":"5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec"} Jan 29 11:09:01 crc kubenswrapper[4852]: I0129 11:09:01.740783 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:09:03 crc kubenswrapper[4852]: I0129 11:09:03.470877 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:09:03 crc kubenswrapper[4852]: E0129 11:09:03.471521 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:09:03 crc kubenswrapper[4852]: I0129 11:09:03.757832 4852 generic.go:334] "Generic (PLEG): container finished" podID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerID="710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec" exitCode=0 Jan 29 11:09:03 crc kubenswrapper[4852]: I0129 11:09:03.757889 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerDied","Data":"710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec"} Jan 29 11:09:04 crc kubenswrapper[4852]: I0129 11:09:04.776977 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerStarted","Data":"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5"} Jan 29 11:09:04 crc kubenswrapper[4852]: I0129 11:09:04.801774 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h8xw4" podStartSLOduration=3.375256384 podStartE2EDuration="5.801758889s" podCreationTimestamp="2026-01-29 11:08:59 +0000 UTC" firstStartedPulling="2026-01-29 11:09:01.740402981 +0000 UTC m=+1638.957734125" lastFinishedPulling="2026-01-29 11:09:04.166905466 +0000 UTC m=+1641.384236630" observedRunningTime="2026-01-29 11:09:04.797380062 +0000 UTC m=+1642.014711226" watchObservedRunningTime="2026-01-29 11:09:04.801758889 +0000 UTC m=+1642.019090023" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.333244 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.335925 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.346396 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.346517 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.352405 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.352723 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w29t\" (UniqueName: \"kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.454379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.454451 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.454503 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w29t\" (UniqueName: \"kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.455092 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.455369 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.478034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w29t\" (UniqueName: \"kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t\") pod \"redhat-marketplace-9x8cq\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.671919 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:07 crc kubenswrapper[4852]: I0129 11:09:07.926668 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:08 crc kubenswrapper[4852]: I0129 11:09:08.812771 4852 generic.go:334] "Generic (PLEG): container finished" podID="10a66568-3085-4fef-833b-3f10dc16d35e" containerID="77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e" exitCode=0 Jan 29 11:09:08 crc kubenswrapper[4852]: I0129 11:09:08.812826 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerDied","Data":"77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e"} Jan 29 11:09:08 crc kubenswrapper[4852]: I0129 11:09:08.812859 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerStarted","Data":"cad5c0d65bc8550bc413cff01768526cbb3a3b912ece34eb7a5f10e3aa194497"} Jan 29 11:09:09 crc kubenswrapper[4852]: I0129 11:09:09.834995 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerStarted","Data":"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b"} Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.087886 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.087992 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.152760 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.850746 4852 generic.go:334] "Generic (PLEG): container finished" podID="10a66568-3085-4fef-833b-3f10dc16d35e" containerID="ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b" exitCode=0 Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.850812 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerDied","Data":"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b"} Jan 29 11:09:10 crc kubenswrapper[4852]: I0129 11:09:10.924228 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:11 crc kubenswrapper[4852]: I0129 11:09:11.863167 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerStarted","Data":"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe"} Jan 29 11:09:11 crc kubenswrapper[4852]: I0129 11:09:11.898866 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9x8cq" podStartSLOduration=2.429800422 podStartE2EDuration="4.898842185s" podCreationTimestamp="2026-01-29 11:09:07 +0000 UTC" firstStartedPulling="2026-01-29 11:09:08.816926115 +0000 UTC m=+1646.034257249" lastFinishedPulling="2026-01-29 11:09:11.285967858 +0000 UTC m=+1648.503299012" observedRunningTime="2026-01-29 11:09:11.892746746 +0000 UTC m=+1649.110077910" watchObservedRunningTime="2026-01-29 11:09:11.898842185 +0000 UTC m=+1649.116173339" Jan 29 11:09:12 crc kubenswrapper[4852]: I0129 11:09:12.490630 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:09:12 crc kubenswrapper[4852]: I0129 11:09:12.870982 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h8xw4" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="registry-server" containerID="cri-o://6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5" gracePeriod=2 Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.289312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.357798 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content\") pod \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.357954 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities\") pod \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.358024 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgnh8\" (UniqueName: \"kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8\") pod \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\" (UID: \"ea08b8ae-c3f7-4939-847d-4fd8526d93b6\") " Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.358709 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities" (OuterVolumeSpecName: "utilities") pod "ea08b8ae-c3f7-4939-847d-4fd8526d93b6" (UID: "ea08b8ae-c3f7-4939-847d-4fd8526d93b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.363528 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8" (OuterVolumeSpecName: "kube-api-access-xgnh8") pod "ea08b8ae-c3f7-4939-847d-4fd8526d93b6" (UID: "ea08b8ae-c3f7-4939-847d-4fd8526d93b6"). InnerVolumeSpecName "kube-api-access-xgnh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.417842 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea08b8ae-c3f7-4939-847d-4fd8526d93b6" (UID: "ea08b8ae-c3f7-4939-847d-4fd8526d93b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.460011 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.460237 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgnh8\" (UniqueName: \"kubernetes.io/projected/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-kube-api-access-xgnh8\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.460417 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea08b8ae-c3f7-4939-847d-4fd8526d93b6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.884369 4852 generic.go:334] "Generic (PLEG): container finished" podID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerID="6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5" exitCode=0 Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.884416 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerDied","Data":"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5"} Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.884733 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h8xw4" event={"ID":"ea08b8ae-c3f7-4939-847d-4fd8526d93b6","Type":"ContainerDied","Data":"c5a35eb1daebaf6bb2ce4f754584f03c61a24845a3cc183e582b3ae70958f291"} Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.884482 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h8xw4" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.884754 4852 scope.go:117] "RemoveContainer" containerID="6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.917068 4852 scope.go:117] "RemoveContainer" containerID="710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.920568 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.926047 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h8xw4"] Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.943007 4852 scope.go:117] "RemoveContainer" containerID="5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.972980 4852 scope.go:117] "RemoveContainer" containerID="6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5" Jan 29 11:09:13 crc kubenswrapper[4852]: E0129 11:09:13.973405 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5\": container with ID starting with 6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5 not found: ID does not exist" containerID="6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.973440 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5"} err="failed to get container status \"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5\": rpc error: code = NotFound desc = could not find container \"6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5\": container with ID starting with 6baaf5dd3d720b8d38cb041ac6ac3fafa39afafbf2d71a0b8abb2e905bb665a5 not found: ID does not exist" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.973467 4852 scope.go:117] "RemoveContainer" containerID="710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec" Jan 29 11:09:13 crc kubenswrapper[4852]: E0129 11:09:13.973936 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec\": container with ID starting with 710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec not found: ID does not exist" containerID="710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.973967 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec"} err="failed to get container status \"710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec\": rpc error: code = NotFound desc = could not find container \"710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec\": container with ID starting with 710cf5f9d2bfdf2a7b2537dd2630a1bb90e8e73505e6dea5c498a03c8d0578ec not found: ID does not exist" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.974007 4852 scope.go:117] "RemoveContainer" containerID="5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec" Jan 29 11:09:13 crc kubenswrapper[4852]: E0129 11:09:13.974531 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec\": container with ID starting with 5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec not found: ID does not exist" containerID="5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec" Jan 29 11:09:13 crc kubenswrapper[4852]: I0129 11:09:13.974633 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec"} err="failed to get container status \"5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec\": rpc error: code = NotFound desc = could not find container \"5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec\": container with ID starting with 5f998e9556383240b3659d00164f9eaa1d0bd9f52991bf21cfcfadf079b878ec not found: ID does not exist" Jan 29 11:09:15 crc kubenswrapper[4852]: I0129 11:09:15.480039 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" path="/var/lib/kubelet/pods/ea08b8ae-c3f7-4939-847d-4fd8526d93b6/volumes" Jan 29 11:09:17 crc kubenswrapper[4852]: I0129 11:09:17.463711 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:09:17 crc kubenswrapper[4852]: E0129 11:09:17.464522 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:09:17 crc kubenswrapper[4852]: I0129 11:09:17.672270 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:17 crc kubenswrapper[4852]: I0129 11:09:17.672345 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:17 crc kubenswrapper[4852]: I0129 11:09:17.747608 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:18 crc kubenswrapper[4852]: I0129 11:09:18.000470 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:18 crc kubenswrapper[4852]: I0129 11:09:18.065574 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:19 crc kubenswrapper[4852]: I0129 11:09:19.944317 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9x8cq" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="registry-server" containerID="cri-o://53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe" gracePeriod=2 Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.926619 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.960451 4852 generic.go:334] "Generic (PLEG): container finished" podID="10a66568-3085-4fef-833b-3f10dc16d35e" containerID="53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe" exitCode=0 Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.960511 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9x8cq" Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.960516 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerDied","Data":"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe"} Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.960553 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9x8cq" event={"ID":"10a66568-3085-4fef-833b-3f10dc16d35e","Type":"ContainerDied","Data":"cad5c0d65bc8550bc413cff01768526cbb3a3b912ece34eb7a5f10e3aa194497"} Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.960574 4852 scope.go:117] "RemoveContainer" containerID="53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe" Jan 29 11:09:20 crc kubenswrapper[4852]: I0129 11:09:20.991501 4852 scope.go:117] "RemoveContainer" containerID="ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.014111 4852 scope.go:117] "RemoveContainer" containerID="77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.039812 4852 scope.go:117] "RemoveContainer" containerID="53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe" Jan 29 11:09:21 crc kubenswrapper[4852]: E0129 11:09:21.040156 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe\": container with ID starting with 53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe not found: ID does not exist" containerID="53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.040193 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe"} err="failed to get container status \"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe\": rpc error: code = NotFound desc = could not find container \"53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe\": container with ID starting with 53dbd7581c726c8b35fb9524b7a1dc5cc01c14757f984914cfc41dc0301d09fe not found: ID does not exist" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.040216 4852 scope.go:117] "RemoveContainer" containerID="ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b" Jan 29 11:09:21 crc kubenswrapper[4852]: E0129 11:09:21.040551 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b\": container with ID starting with ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b not found: ID does not exist" containerID="ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.040576 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b"} err="failed to get container status \"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b\": rpc error: code = NotFound desc = could not find container \"ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b\": container with ID starting with ac3d00536b1e1df063de8f9cb4b7026543db15dd86d150a4ddcc837329ccc28b not found: ID does not exist" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.040611 4852 scope.go:117] "RemoveContainer" containerID="77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e" Jan 29 11:09:21 crc kubenswrapper[4852]: E0129 11:09:21.041122 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e\": container with ID starting with 77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e not found: ID does not exist" containerID="77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.041149 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e"} err="failed to get container status \"77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e\": rpc error: code = NotFound desc = could not find container \"77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e\": container with ID starting with 77c66d8e5068a51c85ef5d4db855dce526fafa5242ce44d67ebd8682f08a2f8e not found: ID does not exist" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.111806 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities\") pod \"10a66568-3085-4fef-833b-3f10dc16d35e\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.111896 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w29t\" (UniqueName: \"kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t\") pod \"10a66568-3085-4fef-833b-3f10dc16d35e\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.111987 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content\") pod \"10a66568-3085-4fef-833b-3f10dc16d35e\" (UID: \"10a66568-3085-4fef-833b-3f10dc16d35e\") " Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.112817 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities" (OuterVolumeSpecName: "utilities") pod "10a66568-3085-4fef-833b-3f10dc16d35e" (UID: "10a66568-3085-4fef-833b-3f10dc16d35e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.121126 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t" (OuterVolumeSpecName: "kube-api-access-9w29t") pod "10a66568-3085-4fef-833b-3f10dc16d35e" (UID: "10a66568-3085-4fef-833b-3f10dc16d35e"). InnerVolumeSpecName "kube-api-access-9w29t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.157983 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10a66568-3085-4fef-833b-3f10dc16d35e" (UID: "10a66568-3085-4fef-833b-3f10dc16d35e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.213786 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.213839 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w29t\" (UniqueName: \"kubernetes.io/projected/10a66568-3085-4fef-833b-3f10dc16d35e-kube-api-access-9w29t\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.213857 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10a66568-3085-4fef-833b-3f10dc16d35e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.322021 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.331989 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9x8cq"] Jan 29 11:09:21 crc kubenswrapper[4852]: I0129 11:09:21.479535 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" path="/var/lib/kubelet/pods/10a66568-3085-4fef-833b-3f10dc16d35e/volumes" Jan 29 11:09:31 crc kubenswrapper[4852]: I0129 11:09:31.464066 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:09:31 crc kubenswrapper[4852]: E0129 11:09:31.465016 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:09:43 crc kubenswrapper[4852]: I0129 11:09:43.469377 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:09:43 crc kubenswrapper[4852]: E0129 11:09:43.470130 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.327093 4852 scope.go:117] "RemoveContainer" containerID="e238f6e4fb8be6b2a6e4c0cdf1fbdffd1010b03584ce000320137c375372d096" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.367560 4852 scope.go:117] "RemoveContainer" containerID="c745dfab9a29fbe2967f87a7ce5cec429071fe93027eb998fd737fe8ea427801" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.384028 4852 scope.go:117] "RemoveContainer" containerID="b5db0dc9d125bb6bd9a82455645892cb37b96d3694ff50709c1a00e7c92e58ab" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.418704 4852 scope.go:117] "RemoveContainer" containerID="312816bc4fa34a94ab28a45822b953085506a1c41a6822adcec69cb7b009660d" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.435177 4852 scope.go:117] "RemoveContainer" containerID="b41dd88e530d0078eac9379893c00ca26e6907c39d4da306037e9f16ecf118b4" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.451472 4852 scope.go:117] "RemoveContainer" containerID="907e93ef39a4a6500cbf037ce8f0712f50de707858f25b39621cba63db775ba4" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.466353 4852 scope.go:117] "RemoveContainer" containerID="997dcd23b056aaad9a977198f37d86b72e4af2e2a523b009c2a3efd396195c6c" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.508773 4852 scope.go:117] "RemoveContainer" containerID="a2ff805ec21cb9448761c97cfa8e29d4dcd8359611c43f72b2b39865d075b412" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.547751 4852 scope.go:117] "RemoveContainer" containerID="2f84f2c11a2ecec3e0c0fe0db54616addf33baede66b8b9e71ce016952a120c9" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.563234 4852 scope.go:117] "RemoveContainer" containerID="8113249d8097096be11542ee81dbefa8adcc9451437768487a64bc9c658b46a8" Jan 29 11:09:47 crc kubenswrapper[4852]: I0129 11:09:47.582859 4852 scope.go:117] "RemoveContainer" containerID="d816637dddc4a3042b5748abee104ea2ba5b25b7a148418ca6e26b6a15ced4dc" Jan 29 11:09:54 crc kubenswrapper[4852]: I0129 11:09:54.464109 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:09:54 crc kubenswrapper[4852]: E0129 11:09:54.464895 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:10:05 crc kubenswrapper[4852]: I0129 11:10:05.464927 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:10:05 crc kubenswrapper[4852]: E0129 11:10:05.465654 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:10:20 crc kubenswrapper[4852]: I0129 11:10:20.463505 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:10:20 crc kubenswrapper[4852]: E0129 11:10:20.464643 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:10:34 crc kubenswrapper[4852]: I0129 11:10:34.463672 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:10:34 crc kubenswrapper[4852]: E0129 11:10:34.464572 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:10:46 crc kubenswrapper[4852]: I0129 11:10:46.465093 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:10:46 crc kubenswrapper[4852]: E0129 11:10:46.466059 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:10:47 crc kubenswrapper[4852]: I0129 11:10:47.714758 4852 scope.go:117] "RemoveContainer" containerID="3fba4011047b82105d23692b2e3c2f2018cbd57dcbd733255f9715aaff21b5b9" Jan 29 11:10:47 crc kubenswrapper[4852]: I0129 11:10:47.748013 4852 scope.go:117] "RemoveContainer" containerID="fce074b5c8c23ae7e6f2429e922beae69e6f5e352d49342fb4993c72c8e442cd" Jan 29 11:10:59 crc kubenswrapper[4852]: I0129 11:10:59.463710 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:10:59 crc kubenswrapper[4852]: E0129 11:10:59.464550 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:11:13 crc kubenswrapper[4852]: I0129 11:11:13.473286 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:11:13 crc kubenswrapper[4852]: E0129 11:11:13.474061 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:11:24 crc kubenswrapper[4852]: I0129 11:11:24.462920 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:11:24 crc kubenswrapper[4852]: E0129 11:11:24.464695 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:11:37 crc kubenswrapper[4852]: I0129 11:11:37.463087 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:11:37 crc kubenswrapper[4852]: E0129 11:11:37.463925 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:11:48 crc kubenswrapper[4852]: I0129 11:11:48.464318 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:11:48 crc kubenswrapper[4852]: E0129 11:11:48.465529 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:12:03 crc kubenswrapper[4852]: I0129 11:12:03.470797 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:12:03 crc kubenswrapper[4852]: E0129 11:12:03.471896 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:12:17 crc kubenswrapper[4852]: I0129 11:12:17.469264 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:12:17 crc kubenswrapper[4852]: E0129 11:12:17.470223 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:12:29 crc kubenswrapper[4852]: I0129 11:12:29.463359 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:12:29 crc kubenswrapper[4852]: E0129 11:12:29.464509 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:12:40 crc kubenswrapper[4852]: I0129 11:12:40.464200 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:12:40 crc kubenswrapper[4852]: I0129 11:12:40.751019 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781"} Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.884261 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.884997 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885009 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.885024 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="extract-utilities" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885030 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="extract-utilities" Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.885040 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885046 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.885056 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="extract-content" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885061 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="extract-content" Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.885074 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="extract-content" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885079 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="extract-content" Jan 29 11:13:49 crc kubenswrapper[4852]: E0129 11:13:49.885087 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="extract-utilities" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885093 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="extract-utilities" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885226 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea08b8ae-c3f7-4939-847d-4fd8526d93b6" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.885236 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="10a66568-3085-4fef-833b-3f10dc16d35e" containerName="registry-server" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.886187 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.901878 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.973431 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6skv6\" (UniqueName: \"kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.973769 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:49 crc kubenswrapper[4852]: I0129 11:13:49.973908 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.075180 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6skv6\" (UniqueName: \"kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.075279 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.075308 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.075777 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.076080 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.094637 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6skv6\" (UniqueName: \"kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6\") pod \"redhat-operators-vk98q\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.251314 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:13:50 crc kubenswrapper[4852]: I0129 11:13:50.727465 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:13:51 crc kubenswrapper[4852]: I0129 11:13:51.316399 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerID="2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b" exitCode=0 Jan 29 11:13:51 crc kubenswrapper[4852]: I0129 11:13:51.316468 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerDied","Data":"2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b"} Jan 29 11:13:51 crc kubenswrapper[4852]: I0129 11:13:51.316712 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerStarted","Data":"699af238f2ee742de2e2544f06f958ea76dd1c8e6b732f03cf765a66dbd37494"} Jan 29 11:13:53 crc kubenswrapper[4852]: I0129 11:13:53.333420 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerStarted","Data":"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe"} Jan 29 11:13:54 crc kubenswrapper[4852]: I0129 11:13:54.342677 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerID="6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe" exitCode=0 Jan 29 11:13:54 crc kubenswrapper[4852]: I0129 11:13:54.342748 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerDied","Data":"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe"} Jan 29 11:13:56 crc kubenswrapper[4852]: I0129 11:13:56.359065 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerStarted","Data":"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00"} Jan 29 11:13:56 crc kubenswrapper[4852]: I0129 11:13:56.376556 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vk98q" podStartSLOduration=3.050001574 podStartE2EDuration="7.376539068s" podCreationTimestamp="2026-01-29 11:13:49 +0000 UTC" firstStartedPulling="2026-01-29 11:13:51.318110609 +0000 UTC m=+1928.535441743" lastFinishedPulling="2026-01-29 11:13:55.644648103 +0000 UTC m=+1932.861979237" observedRunningTime="2026-01-29 11:13:56.372783866 +0000 UTC m=+1933.590115000" watchObservedRunningTime="2026-01-29 11:13:56.376539068 +0000 UTC m=+1933.593870202" Jan 29 11:14:00 crc kubenswrapper[4852]: I0129 11:14:00.252401 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:00 crc kubenswrapper[4852]: I0129 11:14:00.252739 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:01 crc kubenswrapper[4852]: I0129 11:14:01.297609 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vk98q" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="registry-server" probeResult="failure" output=< Jan 29 11:14:01 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 11:14:01 crc kubenswrapper[4852]: > Jan 29 11:14:10 crc kubenswrapper[4852]: I0129 11:14:10.331567 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:10 crc kubenswrapper[4852]: I0129 11:14:10.406151 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:10 crc kubenswrapper[4852]: I0129 11:14:10.576850 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.478101 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vk98q" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="registry-server" containerID="cri-o://34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00" gracePeriod=2 Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.905177 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.957294 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content\") pod \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.957385 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities\") pod \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.957542 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6skv6\" (UniqueName: \"kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6\") pod \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\" (UID: \"ad7c2082-655b-432f-ab04-2dbd42de4fe5\") " Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.958313 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities" (OuterVolumeSpecName: "utilities") pod "ad7c2082-655b-432f-ab04-2dbd42de4fe5" (UID: "ad7c2082-655b-432f-ab04-2dbd42de4fe5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:14:11 crc kubenswrapper[4852]: I0129 11:14:11.967858 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6" (OuterVolumeSpecName: "kube-api-access-6skv6") pod "ad7c2082-655b-432f-ab04-2dbd42de4fe5" (UID: "ad7c2082-655b-432f-ab04-2dbd42de4fe5"). InnerVolumeSpecName "kube-api-access-6skv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.066458 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6skv6\" (UniqueName: \"kubernetes.io/projected/ad7c2082-655b-432f-ab04-2dbd42de4fe5-kube-api-access-6skv6\") on node \"crc\" DevicePath \"\"" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.066517 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.096544 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad7c2082-655b-432f-ab04-2dbd42de4fe5" (UID: "ad7c2082-655b-432f-ab04-2dbd42de4fe5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.168314 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad7c2082-655b-432f-ab04-2dbd42de4fe5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.491080 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerID="34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00" exitCode=0 Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.491126 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerDied","Data":"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00"} Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.491155 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vk98q" event={"ID":"ad7c2082-655b-432f-ab04-2dbd42de4fe5","Type":"ContainerDied","Data":"699af238f2ee742de2e2544f06f958ea76dd1c8e6b732f03cf765a66dbd37494"} Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.491166 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vk98q" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.491174 4852 scope.go:117] "RemoveContainer" containerID="34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.520923 4852 scope.go:117] "RemoveContainer" containerID="6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.550413 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.555525 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vk98q"] Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.565298 4852 scope.go:117] "RemoveContainer" containerID="2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.595397 4852 scope.go:117] "RemoveContainer" containerID="34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00" Jan 29 11:14:12 crc kubenswrapper[4852]: E0129 11:14:12.595782 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00\": container with ID starting with 34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00 not found: ID does not exist" containerID="34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.595813 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00"} err="failed to get container status \"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00\": rpc error: code = NotFound desc = could not find container \"34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00\": container with ID starting with 34cbce607e33ecbdb3bf2fb0dbba67f9539518a2303b83e8ec84946088e0ed00 not found: ID does not exist" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.595835 4852 scope.go:117] "RemoveContainer" containerID="6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe" Jan 29 11:14:12 crc kubenswrapper[4852]: E0129 11:14:12.596044 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe\": container with ID starting with 6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe not found: ID does not exist" containerID="6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.596072 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe"} err="failed to get container status \"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe\": rpc error: code = NotFound desc = could not find container \"6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe\": container with ID starting with 6eb06719a9d50c32875eeeb4ac2ee7e63e6e59a8f9246f593975b31aac4fbbfe not found: ID does not exist" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.596089 4852 scope.go:117] "RemoveContainer" containerID="2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b" Jan 29 11:14:12 crc kubenswrapper[4852]: E0129 11:14:12.596295 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b\": container with ID starting with 2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b not found: ID does not exist" containerID="2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b" Jan 29 11:14:12 crc kubenswrapper[4852]: I0129 11:14:12.596322 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b"} err="failed to get container status \"2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b\": rpc error: code = NotFound desc = could not find container \"2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b\": container with ID starting with 2d3c799e075efc291a2ca846c2e878e65d430df481c186e196a5c1496a7a7f0b not found: ID does not exist" Jan 29 11:14:13 crc kubenswrapper[4852]: I0129 11:14:13.478068 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" path="/var/lib/kubelet/pods/ad7c2082-655b-432f-ab04-2dbd42de4fe5/volumes" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.017287 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.018816 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.149615 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j"] Jan 29 11:15:00 crc kubenswrapper[4852]: E0129 11:15:00.150017 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="extract-utilities" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.150040 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="extract-utilities" Jan 29 11:15:00 crc kubenswrapper[4852]: E0129 11:15:00.150051 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="registry-server" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.150059 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="registry-server" Jan 29 11:15:00 crc kubenswrapper[4852]: E0129 11:15:00.150073 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="extract-content" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.150083 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="extract-content" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.150262 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad7c2082-655b-432f-ab04-2dbd42de4fe5" containerName="registry-server" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.150758 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.152787 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.153022 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.196063 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j"] Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.328175 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.328217 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.328269 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb6t2\" (UniqueName: \"kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.429410 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.429472 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.429546 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb6t2\" (UniqueName: \"kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.430696 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.436457 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.450567 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb6t2\" (UniqueName: \"kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2\") pod \"collect-profiles-29494755-bc75j\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.475917 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:00 crc kubenswrapper[4852]: I0129 11:15:00.962653 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j"] Jan 29 11:15:01 crc kubenswrapper[4852]: I0129 11:15:01.922019 4852 generic.go:334] "Generic (PLEG): container finished" podID="c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" containerID="3a7a74f2590a42f7cf2410880be4d9d5ba7bfe0cfe107145c5aee0153ecf9f71" exitCode=0 Jan 29 11:15:01 crc kubenswrapper[4852]: I0129 11:15:01.922322 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" event={"ID":"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583","Type":"ContainerDied","Data":"3a7a74f2590a42f7cf2410880be4d9d5ba7bfe0cfe107145c5aee0153ecf9f71"} Jan 29 11:15:01 crc kubenswrapper[4852]: I0129 11:15:01.922358 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" event={"ID":"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583","Type":"ContainerStarted","Data":"d682db9c1b583790342cc3a30b75166668bd7a2f5b68c33a6ea5a0e253f1148d"} Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.220363 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.373493 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume\") pod \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.373636 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume\") pod \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.373676 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6t2\" (UniqueName: \"kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2\") pod \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\" (UID: \"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583\") " Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.374794 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume" (OuterVolumeSpecName: "config-volume") pod "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" (UID: "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.404718 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" (UID: "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.404993 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2" (OuterVolumeSpecName: "kube-api-access-sb6t2") pod "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" (UID: "c2cb8a99-9341-4d10-95a4-ac5ad8d3f583"). InnerVolumeSpecName "kube-api-access-sb6t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.475942 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.475984 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.475997 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6t2\" (UniqueName: \"kubernetes.io/projected/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583-kube-api-access-sb6t2\") on node \"crc\" DevicePath \"\"" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.937376 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" event={"ID":"c2cb8a99-9341-4d10-95a4-ac5ad8d3f583","Type":"ContainerDied","Data":"d682db9c1b583790342cc3a30b75166668bd7a2f5b68c33a6ea5a0e253f1148d"} Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.937679 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d682db9c1b583790342cc3a30b75166668bd7a2f5b68c33a6ea5a0e253f1148d" Jan 29 11:15:03 crc kubenswrapper[4852]: I0129 11:15:03.937479 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j" Jan 29 11:15:04 crc kubenswrapper[4852]: I0129 11:15:04.291008 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft"] Jan 29 11:15:04 crc kubenswrapper[4852]: I0129 11:15:04.298235 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494710-sztft"] Jan 29 11:15:05 crc kubenswrapper[4852]: I0129 11:15:05.491429 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08e29585-8b41-4ac1-94f2-38a45107f4b9" path="/var/lib/kubelet/pods/08e29585-8b41-4ac1-94f2-38a45107f4b9/volumes" Jan 29 11:15:30 crc kubenswrapper[4852]: I0129 11:15:30.016807 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:15:30 crc kubenswrapper[4852]: I0129 11:15:30.017519 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:15:47 crc kubenswrapper[4852]: I0129 11:15:47.907124 4852 scope.go:117] "RemoveContainer" containerID="89b9c3b22ac43d6002d1a19dc8d2a4c2be4866f79255e4463dc428c0c9c0ecd1" Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.017381 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.018079 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.018164 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.019462 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.019689 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781" gracePeriod=600 Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.340040 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781" exitCode=0 Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.340107 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781"} Jan 29 11:16:00 crc kubenswrapper[4852]: I0129 11:16:00.340480 4852 scope.go:117] "RemoveContainer" containerID="09953164a9582d02290c7d31a0f8f67b0169e131c9327eb8dfb532df29f7a91f" Jan 29 11:16:01 crc kubenswrapper[4852]: I0129 11:16:01.351967 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f"} Jan 29 11:18:00 crc kubenswrapper[4852]: I0129 11:18:00.017164 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:18:00 crc kubenswrapper[4852]: I0129 11:18:00.017902 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:18:10 crc kubenswrapper[4852]: I0129 11:18:10.982262 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:10 crc kubenswrapper[4852]: E0129 11:18:10.983268 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" containerName="collect-profiles" Jan 29 11:18:10 crc kubenswrapper[4852]: I0129 11:18:10.983288 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" containerName="collect-profiles" Jan 29 11:18:10 crc kubenswrapper[4852]: I0129 11:18:10.983497 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" containerName="collect-profiles" Jan 29 11:18:10 crc kubenswrapper[4852]: I0129 11:18:10.984881 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.015988 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.028852 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bm7l\" (UniqueName: \"kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.028912 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.028939 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.129675 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bm7l\" (UniqueName: \"kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.129767 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.129798 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.130370 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.130411 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.149157 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bm7l\" (UniqueName: \"kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l\") pod \"certified-operators-qs5dp\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.317245 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:11 crc kubenswrapper[4852]: I0129 11:18:11.813219 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:12 crc kubenswrapper[4852]: I0129 11:18:12.375510 4852 generic.go:334] "Generic (PLEG): container finished" podID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerID="c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08" exitCode=0 Jan 29 11:18:12 crc kubenswrapper[4852]: I0129 11:18:12.375629 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerDied","Data":"c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08"} Jan 29 11:18:12 crc kubenswrapper[4852]: I0129 11:18:12.375839 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerStarted","Data":"d0ca8e42a247163fb287eaa9953597ceb82f6467b3291eacee849aec31a5f197"} Jan 29 11:18:12 crc kubenswrapper[4852]: I0129 11:18:12.378486 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:18:13 crc kubenswrapper[4852]: I0129 11:18:13.384174 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerStarted","Data":"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70"} Jan 29 11:18:14 crc kubenswrapper[4852]: I0129 11:18:14.396725 4852 generic.go:334] "Generic (PLEG): container finished" podID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerID="fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70" exitCode=0 Jan 29 11:18:14 crc kubenswrapper[4852]: I0129 11:18:14.396991 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerDied","Data":"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70"} Jan 29 11:18:16 crc kubenswrapper[4852]: I0129 11:18:16.418055 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerStarted","Data":"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae"} Jan 29 11:18:16 crc kubenswrapper[4852]: I0129 11:18:16.439658 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qs5dp" podStartSLOduration=3.378745443 podStartE2EDuration="6.43963766s" podCreationTimestamp="2026-01-29 11:18:10 +0000 UTC" firstStartedPulling="2026-01-29 11:18:12.378227784 +0000 UTC m=+2189.595558918" lastFinishedPulling="2026-01-29 11:18:15.439119971 +0000 UTC m=+2192.656451135" observedRunningTime="2026-01-29 11:18:16.438083351 +0000 UTC m=+2193.655414495" watchObservedRunningTime="2026-01-29 11:18:16.43963766 +0000 UTC m=+2193.656968804" Jan 29 11:18:21 crc kubenswrapper[4852]: I0129 11:18:21.317758 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:21 crc kubenswrapper[4852]: I0129 11:18:21.318864 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:21 crc kubenswrapper[4852]: I0129 11:18:21.370339 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:21 crc kubenswrapper[4852]: I0129 11:18:21.496975 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:21 crc kubenswrapper[4852]: I0129 11:18:21.610443 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.476572 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qs5dp" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="registry-server" containerID="cri-o://dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae" gracePeriod=2 Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.846256 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.953119 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities\") pod \"d453294b-ae30-43cd-b234-9a914d3dfd99\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.953189 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content\") pod \"d453294b-ae30-43cd-b234-9a914d3dfd99\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.953272 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bm7l\" (UniqueName: \"kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l\") pod \"d453294b-ae30-43cd-b234-9a914d3dfd99\" (UID: \"d453294b-ae30-43cd-b234-9a914d3dfd99\") " Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.954099 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities" (OuterVolumeSpecName: "utilities") pod "d453294b-ae30-43cd-b234-9a914d3dfd99" (UID: "d453294b-ae30-43cd-b234-9a914d3dfd99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:18:23 crc kubenswrapper[4852]: I0129 11:18:23.960346 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l" (OuterVolumeSpecName: "kube-api-access-7bm7l") pod "d453294b-ae30-43cd-b234-9a914d3dfd99" (UID: "d453294b-ae30-43cd-b234-9a914d3dfd99"). InnerVolumeSpecName "kube-api-access-7bm7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.018649 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d453294b-ae30-43cd-b234-9a914d3dfd99" (UID: "d453294b-ae30-43cd-b234-9a914d3dfd99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.055432 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bm7l\" (UniqueName: \"kubernetes.io/projected/d453294b-ae30-43cd-b234-9a914d3dfd99-kube-api-access-7bm7l\") on node \"crc\" DevicePath \"\"" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.055474 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.055486 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d453294b-ae30-43cd-b234-9a914d3dfd99-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.489167 4852 generic.go:334] "Generic (PLEG): container finished" podID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerID="dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae" exitCode=0 Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.489209 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerDied","Data":"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae"} Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.489250 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qs5dp" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.489277 4852 scope.go:117] "RemoveContainer" containerID="dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.489265 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qs5dp" event={"ID":"d453294b-ae30-43cd-b234-9a914d3dfd99","Type":"ContainerDied","Data":"d0ca8e42a247163fb287eaa9953597ceb82f6467b3291eacee849aec31a5f197"} Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.509366 4852 scope.go:117] "RemoveContainer" containerID="fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.524165 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.530058 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qs5dp"] Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.552116 4852 scope.go:117] "RemoveContainer" containerID="c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.568669 4852 scope.go:117] "RemoveContainer" containerID="dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae" Jan 29 11:18:24 crc kubenswrapper[4852]: E0129 11:18:24.568937 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae\": container with ID starting with dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae not found: ID does not exist" containerID="dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.568970 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae"} err="failed to get container status \"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae\": rpc error: code = NotFound desc = could not find container \"dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae\": container with ID starting with dfdf2d832152674d930c4c0559fba3d7960cc2298d8f637472437291d2563fae not found: ID does not exist" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.568993 4852 scope.go:117] "RemoveContainer" containerID="fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70" Jan 29 11:18:24 crc kubenswrapper[4852]: E0129 11:18:24.569169 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70\": container with ID starting with fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70 not found: ID does not exist" containerID="fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.569190 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70"} err="failed to get container status \"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70\": rpc error: code = NotFound desc = could not find container \"fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70\": container with ID starting with fa6585a620023d6a592fb08e13615ffa3698c1ccfac878ab138c9b6697335c70 not found: ID does not exist" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.569206 4852 scope.go:117] "RemoveContainer" containerID="c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08" Jan 29 11:18:24 crc kubenswrapper[4852]: E0129 11:18:24.569442 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08\": container with ID starting with c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08 not found: ID does not exist" containerID="c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08" Jan 29 11:18:24 crc kubenswrapper[4852]: I0129 11:18:24.569471 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08"} err="failed to get container status \"c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08\": rpc error: code = NotFound desc = could not find container \"c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08\": container with ID starting with c65319dc9932aa746f4fbb4ebd79f20e1cd4d240334a8f58bdd5ee181c994a08 not found: ID does not exist" Jan 29 11:18:25 crc kubenswrapper[4852]: I0129 11:18:25.478142 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" path="/var/lib/kubelet/pods/d453294b-ae30-43cd-b234-9a914d3dfd99/volumes" Jan 29 11:18:30 crc kubenswrapper[4852]: I0129 11:18:30.017253 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:18:30 crc kubenswrapper[4852]: I0129 11:18:30.018056 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.017092 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.018033 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.018137 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.018965 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.019054 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" gracePeriod=600 Jan 29 11:19:00 crc kubenswrapper[4852]: E0129 11:19:00.150547 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.815881 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" exitCode=0 Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.815908 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f"} Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.815963 4852 scope.go:117] "RemoveContainer" containerID="ea92e7a4626eb3b5a4b59d32752972a17641b8f91711837a4e54ca0ac4e1a781" Jan 29 11:19:00 crc kubenswrapper[4852]: I0129 11:19:00.816633 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:19:00 crc kubenswrapper[4852]: E0129 11:19:00.816939 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.414209 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:07 crc kubenswrapper[4852]: E0129 11:19:07.415202 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="extract-content" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.415221 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="extract-content" Jan 29 11:19:07 crc kubenswrapper[4852]: E0129 11:19:07.415242 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="extract-utilities" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.415251 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="extract-utilities" Jan 29 11:19:07 crc kubenswrapper[4852]: E0129 11:19:07.415266 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="registry-server" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.415276 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="registry-server" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.415444 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d453294b-ae30-43cd-b234-9a914d3dfd99" containerName="registry-server" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.416744 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.421911 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.551023 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.551116 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqx74\" (UniqueName: \"kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.551477 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.652565 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.652668 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.652699 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqx74\" (UniqueName: \"kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.653067 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.653106 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.673241 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqx74\" (UniqueName: \"kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74\") pod \"community-operators-klbvq\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:07 crc kubenswrapper[4852]: I0129 11:19:07.736984 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:08 crc kubenswrapper[4852]: I0129 11:19:08.272460 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:08 crc kubenswrapper[4852]: I0129 11:19:08.879094 4852 generic.go:334] "Generic (PLEG): container finished" podID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerID="7e000a4dbd87153fe64414e9401ab279bc60d54a94dfa5fb7ee0ed905888b826" exitCode=0 Jan 29 11:19:08 crc kubenswrapper[4852]: I0129 11:19:08.879168 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerDied","Data":"7e000a4dbd87153fe64414e9401ab279bc60d54a94dfa5fb7ee0ed905888b826"} Jan 29 11:19:08 crc kubenswrapper[4852]: I0129 11:19:08.879733 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerStarted","Data":"c7d9c4caa868a2a6363d4ffee82ab3ed332c6780f5bb7d9466d84d604ca90fd7"} Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.011254 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.014189 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.022947 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.088850 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k8jw\" (UniqueName: \"kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.088928 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.089251 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.190655 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.190787 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.190833 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k8jw\" (UniqueName: \"kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.191653 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.191773 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.221357 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k8jw\" (UniqueName: \"kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw\") pod \"redhat-marketplace-pbs7n\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.337778 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.856327 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:10 crc kubenswrapper[4852]: I0129 11:19:10.897846 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerStarted","Data":"e50de6a78c18d88571361bf42213cb965f4a58baea250af8c7466f76ebe86892"} Jan 29 11:19:11 crc kubenswrapper[4852]: I0129 11:19:11.908298 4852 generic.go:334] "Generic (PLEG): container finished" podID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerID="00d15844a0a54c0622245b5a6d73d9b71b266c848ec4fde43f21206a710f0d77" exitCode=0 Jan 29 11:19:11 crc kubenswrapper[4852]: I0129 11:19:11.908381 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerDied","Data":"00d15844a0a54c0622245b5a6d73d9b71b266c848ec4fde43f21206a710f0d77"} Jan 29 11:19:11 crc kubenswrapper[4852]: I0129 11:19:11.911117 4852 generic.go:334] "Generic (PLEG): container finished" podID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerID="78e235e2431ca4c233782732ee99b3fa421bc956436ec669d1814e22f8b37d77" exitCode=0 Jan 29 11:19:11 crc kubenswrapper[4852]: I0129 11:19:11.911158 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerDied","Data":"78e235e2431ca4c233782732ee99b3fa421bc956436ec669d1814e22f8b37d77"} Jan 29 11:19:12 crc kubenswrapper[4852]: I0129 11:19:12.463928 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:19:12 crc kubenswrapper[4852]: E0129 11:19:12.464138 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:19:13 crc kubenswrapper[4852]: I0129 11:19:13.937279 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerStarted","Data":"6ef85dc19b0af961910a662fdc72e9d1ea6e268470b95612bf3f360c544eb12c"} Jan 29 11:19:13 crc kubenswrapper[4852]: I0129 11:19:13.968374 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-klbvq" podStartSLOduration=2.4610599029999998 podStartE2EDuration="6.968348624s" podCreationTimestamp="2026-01-29 11:19:07 +0000 UTC" firstStartedPulling="2026-01-29 11:19:08.881005619 +0000 UTC m=+2246.098336793" lastFinishedPulling="2026-01-29 11:19:13.38829433 +0000 UTC m=+2250.605625514" observedRunningTime="2026-01-29 11:19:13.962046511 +0000 UTC m=+2251.179377675" watchObservedRunningTime="2026-01-29 11:19:13.968348624 +0000 UTC m=+2251.185679758" Jan 29 11:19:14 crc kubenswrapper[4852]: I0129 11:19:14.948154 4852 generic.go:334] "Generic (PLEG): container finished" podID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerID="68a7082a0cab1023a08961c8024f1b92d39b9147ad9144cf729194c5f3dd698d" exitCode=0 Jan 29 11:19:14 crc kubenswrapper[4852]: I0129 11:19:14.948207 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerDied","Data":"68a7082a0cab1023a08961c8024f1b92d39b9147ad9144cf729194c5f3dd698d"} Jan 29 11:19:16 crc kubenswrapper[4852]: I0129 11:19:16.967752 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerStarted","Data":"b6c4170cfd79360230ef4b273b89a25b4f55a5f9209cc4481fff7602cac7813c"} Jan 29 11:19:17 crc kubenswrapper[4852]: I0129 11:19:17.002613 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pbs7n" podStartSLOduration=4.095110634 podStartE2EDuration="8.002556018s" podCreationTimestamp="2026-01-29 11:19:09 +0000 UTC" firstStartedPulling="2026-01-29 11:19:11.912420905 +0000 UTC m=+2249.129752049" lastFinishedPulling="2026-01-29 11:19:15.819866259 +0000 UTC m=+2253.037197433" observedRunningTime="2026-01-29 11:19:16.991373025 +0000 UTC m=+2254.208704199" watchObservedRunningTime="2026-01-29 11:19:17.002556018 +0000 UTC m=+2254.219887192" Jan 29 11:19:17 crc kubenswrapper[4852]: I0129 11:19:17.738135 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:17 crc kubenswrapper[4852]: I0129 11:19:17.738209 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:17 crc kubenswrapper[4852]: I0129 11:19:17.786485 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:20 crc kubenswrapper[4852]: I0129 11:19:20.338744 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:20 crc kubenswrapper[4852]: I0129 11:19:20.339277 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:20 crc kubenswrapper[4852]: I0129 11:19:20.397376 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:21 crc kubenswrapper[4852]: I0129 11:19:21.071144 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:21 crc kubenswrapper[4852]: I0129 11:19:21.999337 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:23 crc kubenswrapper[4852]: I0129 11:19:23.021562 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pbs7n" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="registry-server" containerID="cri-o://b6c4170cfd79360230ef4b273b89a25b4f55a5f9209cc4481fff7602cac7813c" gracePeriod=2 Jan 29 11:19:23 crc kubenswrapper[4852]: I0129 11:19:23.472126 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:19:23 crc kubenswrapper[4852]: E0129 11:19:23.472554 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.046576 4852 generic.go:334] "Generic (PLEG): container finished" podID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerID="b6c4170cfd79360230ef4b273b89a25b4f55a5f9209cc4481fff7602cac7813c" exitCode=0 Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.046750 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerDied","Data":"b6c4170cfd79360230ef4b273b89a25b4f55a5f9209cc4481fff7602cac7813c"} Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.665938 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.832444 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content\") pod \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.832828 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k8jw\" (UniqueName: \"kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw\") pod \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.832856 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities\") pod \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\" (UID: \"4a846fa1-09ca-40dd-a667-39abdcdfaf84\") " Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.834134 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities" (OuterVolumeSpecName: "utilities") pod "4a846fa1-09ca-40dd-a667-39abdcdfaf84" (UID: "4a846fa1-09ca-40dd-a667-39abdcdfaf84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.845827 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw" (OuterVolumeSpecName: "kube-api-access-4k8jw") pod "4a846fa1-09ca-40dd-a667-39abdcdfaf84" (UID: "4a846fa1-09ca-40dd-a667-39abdcdfaf84"). InnerVolumeSpecName "kube-api-access-4k8jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.881716 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a846fa1-09ca-40dd-a667-39abdcdfaf84" (UID: "4a846fa1-09ca-40dd-a667-39abdcdfaf84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.934464 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.934504 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k8jw\" (UniqueName: \"kubernetes.io/projected/4a846fa1-09ca-40dd-a667-39abdcdfaf84-kube-api-access-4k8jw\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:25 crc kubenswrapper[4852]: I0129 11:19:25.934521 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a846fa1-09ca-40dd-a667-39abdcdfaf84-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.057404 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pbs7n" event={"ID":"4a846fa1-09ca-40dd-a667-39abdcdfaf84","Type":"ContainerDied","Data":"e50de6a78c18d88571361bf42213cb965f4a58baea250af8c7466f76ebe86892"} Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.057474 4852 scope.go:117] "RemoveContainer" containerID="b6c4170cfd79360230ef4b273b89a25b4f55a5f9209cc4481fff7602cac7813c" Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.057553 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pbs7n" Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.090109 4852 scope.go:117] "RemoveContainer" containerID="68a7082a0cab1023a08961c8024f1b92d39b9147ad9144cf729194c5f3dd698d" Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.116564 4852 scope.go:117] "RemoveContainer" containerID="78e235e2431ca4c233782732ee99b3fa421bc956436ec669d1814e22f8b37d77" Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.126752 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:26 crc kubenswrapper[4852]: I0129 11:19:26.143132 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pbs7n"] Jan 29 11:19:27 crc kubenswrapper[4852]: I0129 11:19:27.473528 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" path="/var/lib/kubelet/pods/4a846fa1-09ca-40dd-a667-39abdcdfaf84/volumes" Jan 29 11:19:27 crc kubenswrapper[4852]: I0129 11:19:27.794224 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:28 crc kubenswrapper[4852]: I0129 11:19:28.598855 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:28 crc kubenswrapper[4852]: I0129 11:19:28.599383 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-klbvq" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="registry-server" containerID="cri-o://6ef85dc19b0af961910a662fdc72e9d1ea6e268470b95612bf3f360c544eb12c" gracePeriod=2 Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.088255 4852 generic.go:334] "Generic (PLEG): container finished" podID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerID="6ef85dc19b0af961910a662fdc72e9d1ea6e268470b95612bf3f360c544eb12c" exitCode=0 Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.088327 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerDied","Data":"6ef85dc19b0af961910a662fdc72e9d1ea6e268470b95612bf3f360c544eb12c"} Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.648468 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.803465 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content\") pod \"f9f43371-9a83-487e-bf01-757cea9fbad5\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.803652 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqx74\" (UniqueName: \"kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74\") pod \"f9f43371-9a83-487e-bf01-757cea9fbad5\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.803748 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities\") pod \"f9f43371-9a83-487e-bf01-757cea9fbad5\" (UID: \"f9f43371-9a83-487e-bf01-757cea9fbad5\") " Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.805102 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities" (OuterVolumeSpecName: "utilities") pod "f9f43371-9a83-487e-bf01-757cea9fbad5" (UID: "f9f43371-9a83-487e-bf01-757cea9fbad5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.814010 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74" (OuterVolumeSpecName: "kube-api-access-dqx74") pod "f9f43371-9a83-487e-bf01-757cea9fbad5" (UID: "f9f43371-9a83-487e-bf01-757cea9fbad5"). InnerVolumeSpecName "kube-api-access-dqx74". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.868194 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9f43371-9a83-487e-bf01-757cea9fbad5" (UID: "f9f43371-9a83-487e-bf01-757cea9fbad5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.905713 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.905746 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f43371-9a83-487e-bf01-757cea9fbad5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:29 crc kubenswrapper[4852]: I0129 11:19:29.905760 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqx74\" (UniqueName: \"kubernetes.io/projected/f9f43371-9a83-487e-bf01-757cea9fbad5-kube-api-access-dqx74\") on node \"crc\" DevicePath \"\"" Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.097208 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-klbvq" event={"ID":"f9f43371-9a83-487e-bf01-757cea9fbad5","Type":"ContainerDied","Data":"c7d9c4caa868a2a6363d4ffee82ab3ed332c6780f5bb7d9466d84d604ca90fd7"} Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.097268 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-klbvq" Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.097282 4852 scope.go:117] "RemoveContainer" containerID="6ef85dc19b0af961910a662fdc72e9d1ea6e268470b95612bf3f360c544eb12c" Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.117948 4852 scope.go:117] "RemoveContainer" containerID="00d15844a0a54c0622245b5a6d73d9b71b266c848ec4fde43f21206a710f0d77" Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.143214 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.153639 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-klbvq"] Jan 29 11:19:30 crc kubenswrapper[4852]: I0129 11:19:30.165815 4852 scope.go:117] "RemoveContainer" containerID="7e000a4dbd87153fe64414e9401ab279bc60d54a94dfa5fb7ee0ed905888b826" Jan 29 11:19:31 crc kubenswrapper[4852]: I0129 11:19:31.477700 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" path="/var/lib/kubelet/pods/f9f43371-9a83-487e-bf01-757cea9fbad5/volumes" Jan 29 11:19:37 crc kubenswrapper[4852]: I0129 11:19:37.463770 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:19:37 crc kubenswrapper[4852]: E0129 11:19:37.464495 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:19:49 crc kubenswrapper[4852]: I0129 11:19:49.462980 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:19:49 crc kubenswrapper[4852]: E0129 11:19:49.463554 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:20:00 crc kubenswrapper[4852]: I0129 11:20:00.463923 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:20:00 crc kubenswrapper[4852]: E0129 11:20:00.464758 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:20:12 crc kubenswrapper[4852]: I0129 11:20:12.464781 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:20:12 crc kubenswrapper[4852]: E0129 11:20:12.466242 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:20:25 crc kubenswrapper[4852]: I0129 11:20:25.464651 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:20:25 crc kubenswrapper[4852]: E0129 11:20:25.465856 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:20:36 crc kubenswrapper[4852]: I0129 11:20:36.463010 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:20:36 crc kubenswrapper[4852]: E0129 11:20:36.463646 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:20:51 crc kubenswrapper[4852]: I0129 11:20:51.464555 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:20:51 crc kubenswrapper[4852]: E0129 11:20:51.465904 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:21:02 crc kubenswrapper[4852]: I0129 11:21:02.464139 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:21:02 crc kubenswrapper[4852]: E0129 11:21:02.465058 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:21:17 crc kubenswrapper[4852]: I0129 11:21:17.464132 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:21:17 crc kubenswrapper[4852]: E0129 11:21:17.464745 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:21:29 crc kubenswrapper[4852]: I0129 11:21:29.468365 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:21:29 crc kubenswrapper[4852]: E0129 11:21:29.469522 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:21:41 crc kubenswrapper[4852]: I0129 11:21:41.464369 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:21:41 crc kubenswrapper[4852]: E0129 11:21:41.465073 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:21:52 crc kubenswrapper[4852]: I0129 11:21:52.463307 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:21:52 crc kubenswrapper[4852]: E0129 11:21:52.464134 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:22:04 crc kubenswrapper[4852]: I0129 11:22:04.463616 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:22:04 crc kubenswrapper[4852]: E0129 11:22:04.464440 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:22:16 crc kubenswrapper[4852]: I0129 11:22:16.463424 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:22:16 crc kubenswrapper[4852]: E0129 11:22:16.464279 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:22:28 crc kubenswrapper[4852]: I0129 11:22:28.464164 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:22:28 crc kubenswrapper[4852]: E0129 11:22:28.465163 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:22:42 crc kubenswrapper[4852]: I0129 11:22:42.463970 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:22:42 crc kubenswrapper[4852]: E0129 11:22:42.464763 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:22:57 crc kubenswrapper[4852]: I0129 11:22:57.463789 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:22:57 crc kubenswrapper[4852]: E0129 11:22:57.464536 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:23:12 crc kubenswrapper[4852]: I0129 11:23:12.463468 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:23:12 crc kubenswrapper[4852]: E0129 11:23:12.464205 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:23:27 crc kubenswrapper[4852]: I0129 11:23:27.463512 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:23:27 crc kubenswrapper[4852]: E0129 11:23:27.464255 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:23:38 crc kubenswrapper[4852]: I0129 11:23:38.463614 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:23:38 crc kubenswrapper[4852]: E0129 11:23:38.464641 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:23:50 crc kubenswrapper[4852]: I0129 11:23:50.463644 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:23:50 crc kubenswrapper[4852]: E0129 11:23:50.464369 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:24:05 crc kubenswrapper[4852]: I0129 11:24:05.464047 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:24:06 crc kubenswrapper[4852]: I0129 11:24:06.381331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f"} Jan 29 11:26:30 crc kubenswrapper[4852]: I0129 11:26:30.016692 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:26:30 crc kubenswrapper[4852]: I0129 11:26:30.017203 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:27:00 crc kubenswrapper[4852]: I0129 11:27:00.017280 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:27:00 crc kubenswrapper[4852]: I0129 11:27:00.018755 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:27:30 crc kubenswrapper[4852]: I0129 11:27:30.017044 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:27:30 crc kubenswrapper[4852]: I0129 11:27:30.017901 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:27:30 crc kubenswrapper[4852]: I0129 11:27:30.017971 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:27:30 crc kubenswrapper[4852]: I0129 11:27:30.019010 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:27:30 crc kubenswrapper[4852]: I0129 11:27:30.019144 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f" gracePeriod=600 Jan 29 11:27:31 crc kubenswrapper[4852]: I0129 11:27:31.095909 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f" exitCode=0 Jan 29 11:27:31 crc kubenswrapper[4852]: I0129 11:27:31.096196 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f"} Jan 29 11:27:31 crc kubenswrapper[4852]: I0129 11:27:31.096219 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64"} Jan 29 11:27:31 crc kubenswrapper[4852]: I0129 11:27:31.096234 4852 scope.go:117] "RemoveContainer" containerID="702464b89675bc19fb956e9e165a723f4404fe90e9f7ca3a5f57bca8cdfe013f" Jan 29 11:29:30 crc kubenswrapper[4852]: I0129 11:29:30.017393 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:29:30 crc kubenswrapper[4852]: I0129 11:29:30.018008 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.017210 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.017967 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.173776 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb"] Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174260 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="extract-utilities" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174293 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="extract-utilities" Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174318 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174331 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174351 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="extract-utilities" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174363 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="extract-utilities" Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174380 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174394 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174416 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="extract-content" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174428 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="extract-content" Jan 29 11:30:00 crc kubenswrapper[4852]: E0129 11:30:00.174448 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="extract-content" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174460 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="extract-content" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174776 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9f43371-9a83-487e-bf01-757cea9fbad5" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.174816 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a846fa1-09ca-40dd-a667-39abdcdfaf84" containerName="registry-server" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.175766 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.179216 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.180050 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.201185 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb"] Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.298021 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.298098 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.298231 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhz4r\" (UniqueName: \"kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.399913 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.399976 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.400048 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhz4r\" (UniqueName: \"kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.402409 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.410149 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.425544 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhz4r\" (UniqueName: \"kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r\") pod \"collect-profiles-29494770-rgvbb\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.512716 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:00 crc kubenswrapper[4852]: I0129 11:30:00.765108 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb"] Jan 29 11:30:01 crc kubenswrapper[4852]: I0129 11:30:01.295676 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" event={"ID":"4a0fd4c7-2f4b-482d-8ce6-652785813e37","Type":"ContainerStarted","Data":"baf1bc54c8dacac205c5821c18da0d26bfd351c4c0978a7464cf00035aca6487"} Jan 29 11:30:01 crc kubenswrapper[4852]: I0129 11:30:01.295720 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" event={"ID":"4a0fd4c7-2f4b-482d-8ce6-652785813e37","Type":"ContainerStarted","Data":"bbb3de3c43c9344ff9fb208ea9927fb4bcc09f7b15307d42bcdc33258d13f017"} Jan 29 11:30:01 crc kubenswrapper[4852]: I0129 11:30:01.323317 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" podStartSLOduration=1.323291038 podStartE2EDuration="1.323291038s" podCreationTimestamp="2026-01-29 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:30:01.318714398 +0000 UTC m=+2898.536045532" watchObservedRunningTime="2026-01-29 11:30:01.323291038 +0000 UTC m=+2898.540622202" Jan 29 11:30:02 crc kubenswrapper[4852]: I0129 11:30:02.307925 4852 generic.go:334] "Generic (PLEG): container finished" podID="4a0fd4c7-2f4b-482d-8ce6-652785813e37" containerID="baf1bc54c8dacac205c5821c18da0d26bfd351c4c0978a7464cf00035aca6487" exitCode=0 Jan 29 11:30:02 crc kubenswrapper[4852]: I0129 11:30:02.308035 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" event={"ID":"4a0fd4c7-2f4b-482d-8ce6-652785813e37","Type":"ContainerDied","Data":"baf1bc54c8dacac205c5821c18da0d26bfd351c4c0978a7464cf00035aca6487"} Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.610617 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.750408 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume\") pod \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.750508 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume\") pod \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.750617 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhz4r\" (UniqueName: \"kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r\") pod \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\" (UID: \"4a0fd4c7-2f4b-482d-8ce6-652785813e37\") " Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.751513 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume" (OuterVolumeSpecName: "config-volume") pod "4a0fd4c7-2f4b-482d-8ce6-652785813e37" (UID: "4a0fd4c7-2f4b-482d-8ce6-652785813e37"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.757506 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4a0fd4c7-2f4b-482d-8ce6-652785813e37" (UID: "4a0fd4c7-2f4b-482d-8ce6-652785813e37"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.757556 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r" (OuterVolumeSpecName: "kube-api-access-mhz4r") pod "4a0fd4c7-2f4b-482d-8ce6-652785813e37" (UID: "4a0fd4c7-2f4b-482d-8ce6-652785813e37"). InnerVolumeSpecName "kube-api-access-mhz4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.852507 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a0fd4c7-2f4b-482d-8ce6-652785813e37-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.852547 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a0fd4c7-2f4b-482d-8ce6-652785813e37-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:03 crc kubenswrapper[4852]: I0129 11:30:03.852561 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhz4r\" (UniqueName: \"kubernetes.io/projected/4a0fd4c7-2f4b-482d-8ce6-652785813e37-kube-api-access-mhz4r\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:04 crc kubenswrapper[4852]: I0129 11:30:04.328281 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" event={"ID":"4a0fd4c7-2f4b-482d-8ce6-652785813e37","Type":"ContainerDied","Data":"bbb3de3c43c9344ff9fb208ea9927fb4bcc09f7b15307d42bcdc33258d13f017"} Jan 29 11:30:04 crc kubenswrapper[4852]: I0129 11:30:04.328325 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbb3de3c43c9344ff9fb208ea9927fb4bcc09f7b15307d42bcdc33258d13f017" Jan 29 11:30:04 crc kubenswrapper[4852]: I0129 11:30:04.328375 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb" Jan 29 11:30:04 crc kubenswrapper[4852]: I0129 11:30:04.395360 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc"] Jan 29 11:30:04 crc kubenswrapper[4852]: I0129 11:30:04.400820 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494725-n4kzc"] Jan 29 11:30:05 crc kubenswrapper[4852]: I0129 11:30:05.585251 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6996b7b-e91f-4806-875c-b579f9aa9211" path="/var/lib/kubelet/pods/c6996b7b-e91f-4806-875c-b579f9aa9211/volumes" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.920668 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:15 crc kubenswrapper[4852]: E0129 11:30:15.921724 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a0fd4c7-2f4b-482d-8ce6-652785813e37" containerName="collect-profiles" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.921745 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a0fd4c7-2f4b-482d-8ce6-652785813e37" containerName="collect-profiles" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.922014 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a0fd4c7-2f4b-482d-8ce6-652785813e37" containerName="collect-profiles" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.923723 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.935228 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.948797 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sjsv\" (UniqueName: \"kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.948883 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:15 crc kubenswrapper[4852]: I0129 11:30:15.948924 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.049689 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.049752 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.049806 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sjsv\" (UniqueName: \"kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.050242 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.050275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.073853 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sjsv\" (UniqueName: \"kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv\") pod \"redhat-marketplace-s8kw5\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.248156 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:16 crc kubenswrapper[4852]: I0129 11:30:16.706932 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:17 crc kubenswrapper[4852]: I0129 11:30:17.668747 4852 generic.go:334] "Generic (PLEG): container finished" podID="13be012a-514b-4237-87da-79286f5977f2" containerID="76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1" exitCode=0 Jan 29 11:30:17 crc kubenswrapper[4852]: I0129 11:30:17.668859 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerDied","Data":"76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1"} Jan 29 11:30:17 crc kubenswrapper[4852]: I0129 11:30:17.669090 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerStarted","Data":"bc2879213d289946332548f11e4124743f06b1413f2e47eafbc2b10edb4d56a2"} Jan 29 11:30:17 crc kubenswrapper[4852]: I0129 11:30:17.671427 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:30:19 crc kubenswrapper[4852]: I0129 11:30:19.685639 4852 generic.go:334] "Generic (PLEG): container finished" podID="13be012a-514b-4237-87da-79286f5977f2" containerID="0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a" exitCode=0 Jan 29 11:30:19 crc kubenswrapper[4852]: I0129 11:30:19.685869 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerDied","Data":"0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a"} Jan 29 11:30:20 crc kubenswrapper[4852]: I0129 11:30:20.695779 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerStarted","Data":"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc"} Jan 29 11:30:20 crc kubenswrapper[4852]: I0129 11:30:20.727227 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s8kw5" podStartSLOduration=3.204409299 podStartE2EDuration="5.727204811s" podCreationTimestamp="2026-01-29 11:30:15 +0000 UTC" firstStartedPulling="2026-01-29 11:30:17.670919509 +0000 UTC m=+2914.888250663" lastFinishedPulling="2026-01-29 11:30:20.193715041 +0000 UTC m=+2917.411046175" observedRunningTime="2026-01-29 11:30:20.720665732 +0000 UTC m=+2917.937996906" watchObservedRunningTime="2026-01-29 11:30:20.727204811 +0000 UTC m=+2917.944535955" Jan 29 11:30:26 crc kubenswrapper[4852]: I0129 11:30:26.249253 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:26 crc kubenswrapper[4852]: I0129 11:30:26.249804 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:26 crc kubenswrapper[4852]: I0129 11:30:26.299217 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:26 crc kubenswrapper[4852]: I0129 11:30:26.819786 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:26 crc kubenswrapper[4852]: I0129 11:30:26.889058 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:28 crc kubenswrapper[4852]: I0129 11:30:28.757820 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s8kw5" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="registry-server" containerID="cri-o://715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc" gracePeriod=2 Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.156086 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.347171 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sjsv\" (UniqueName: \"kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv\") pod \"13be012a-514b-4237-87da-79286f5977f2\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.347777 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities\") pod \"13be012a-514b-4237-87da-79286f5977f2\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.347808 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content\") pod \"13be012a-514b-4237-87da-79286f5977f2\" (UID: \"13be012a-514b-4237-87da-79286f5977f2\") " Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.349555 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities" (OuterVolumeSpecName: "utilities") pod "13be012a-514b-4237-87da-79286f5977f2" (UID: "13be012a-514b-4237-87da-79286f5977f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.356180 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv" (OuterVolumeSpecName: "kube-api-access-8sjsv") pod "13be012a-514b-4237-87da-79286f5977f2" (UID: "13be012a-514b-4237-87da-79286f5977f2"). InnerVolumeSpecName "kube-api-access-8sjsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.450261 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sjsv\" (UniqueName: \"kubernetes.io/projected/13be012a-514b-4237-87da-79286f5977f2-kube-api-access-8sjsv\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.450295 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.606027 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13be012a-514b-4237-87da-79286f5977f2" (UID: "13be012a-514b-4237-87da-79286f5977f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.654362 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13be012a-514b-4237-87da-79286f5977f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.768080 4852 generic.go:334] "Generic (PLEG): container finished" podID="13be012a-514b-4237-87da-79286f5977f2" containerID="715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc" exitCode=0 Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.768127 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerDied","Data":"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc"} Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.768167 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s8kw5" event={"ID":"13be012a-514b-4237-87da-79286f5977f2","Type":"ContainerDied","Data":"bc2879213d289946332548f11e4124743f06b1413f2e47eafbc2b10edb4d56a2"} Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.768168 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s8kw5" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.768190 4852 scope.go:117] "RemoveContainer" containerID="715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.798296 4852 scope.go:117] "RemoveContainer" containerID="0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.808824 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.822994 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s8kw5"] Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.824268 4852 scope.go:117] "RemoveContainer" containerID="76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.854712 4852 scope.go:117] "RemoveContainer" containerID="715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc" Jan 29 11:30:29 crc kubenswrapper[4852]: E0129 11:30:29.855387 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc\": container with ID starting with 715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc not found: ID does not exist" containerID="715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.855424 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc"} err="failed to get container status \"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc\": rpc error: code = NotFound desc = could not find container \"715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc\": container with ID starting with 715382ce10467d15b1b74ba8930bebb513fccdd648b1fb3740556a50c241b9dc not found: ID does not exist" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.855446 4852 scope.go:117] "RemoveContainer" containerID="0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a" Jan 29 11:30:29 crc kubenswrapper[4852]: E0129 11:30:29.855831 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a\": container with ID starting with 0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a not found: ID does not exist" containerID="0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.855861 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a"} err="failed to get container status \"0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a\": rpc error: code = NotFound desc = could not find container \"0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a\": container with ID starting with 0835140a97ab52979b2724ed48869f226d73337840d033e31aacc8e3170b037a not found: ID does not exist" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.855880 4852 scope.go:117] "RemoveContainer" containerID="76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1" Jan 29 11:30:29 crc kubenswrapper[4852]: E0129 11:30:29.856259 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1\": container with ID starting with 76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1 not found: ID does not exist" containerID="76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1" Jan 29 11:30:29 crc kubenswrapper[4852]: I0129 11:30:29.856294 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1"} err="failed to get container status \"76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1\": rpc error: code = NotFound desc = could not find container \"76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1\": container with ID starting with 76d1a516c52a95ff0ab947baa48ba2f023fee2e253aa82e2eeefab3eef70deb1 not found: ID does not exist" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.017571 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.017668 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.017721 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.018475 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.018568 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" gracePeriod=600 Jan 29 11:30:30 crc kubenswrapper[4852]: E0129 11:30:30.644289 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.782010 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" exitCode=0 Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.782100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64"} Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.782268 4852 scope.go:117] "RemoveContainer" containerID="1d59f6a3d6767b7ffed48c6db1d26df18e1816714b7968fe3262eacd233ba70f" Jan 29 11:30:30 crc kubenswrapper[4852]: I0129 11:30:30.782975 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:30:30 crc kubenswrapper[4852]: E0129 11:30:30.783329 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:30:31 crc kubenswrapper[4852]: I0129 11:30:31.477091 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13be012a-514b-4237-87da-79286f5977f2" path="/var/lib/kubelet/pods/13be012a-514b-4237-87da-79286f5977f2/volumes" Jan 29 11:30:41 crc kubenswrapper[4852]: I0129 11:30:41.464349 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:30:41 crc kubenswrapper[4852]: E0129 11:30:41.465154 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.633794 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:42 crc kubenswrapper[4852]: E0129 11:30:42.635190 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="registry-server" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.635204 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="registry-server" Jan 29 11:30:42 crc kubenswrapper[4852]: E0129 11:30:42.635232 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="extract-utilities" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.635238 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="extract-utilities" Jan 29 11:30:42 crc kubenswrapper[4852]: E0129 11:30:42.635251 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="extract-content" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.635257 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="extract-content" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.635393 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="13be012a-514b-4237-87da-79286f5977f2" containerName="registry-server" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.639795 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.643177 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.653901 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5vkb\" (UniqueName: \"kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.653949 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.654081 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.756360 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.756432 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5vkb\" (UniqueName: \"kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.756455 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.756882 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.757338 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.789956 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5vkb\" (UniqueName: \"kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb\") pod \"community-operators-g7sw8\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:42 crc kubenswrapper[4852]: I0129 11:30:42.959728 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:43 crc kubenswrapper[4852]: I0129 11:30:43.511404 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:43 crc kubenswrapper[4852]: I0129 11:30:43.891851 4852 generic.go:334] "Generic (PLEG): container finished" podID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerID="98e3a30a8017dabca805feab98f8a921623e08497f7c9705d4637f50c55513cf" exitCode=0 Jan 29 11:30:43 crc kubenswrapper[4852]: I0129 11:30:43.891908 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerDied","Data":"98e3a30a8017dabca805feab98f8a921623e08497f7c9705d4637f50c55513cf"} Jan 29 11:30:43 crc kubenswrapper[4852]: I0129 11:30:43.891937 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerStarted","Data":"88a8de5ce00fd809102f9d5dc0f263474c3f78bf5998a030d44380606535701a"} Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.030927 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.033140 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.048907 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.095502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.095566 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hshk\" (UniqueName: \"kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.095730 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.197563 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.197791 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.197870 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hshk\" (UniqueName: \"kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.198083 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.198366 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.217447 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hshk\" (UniqueName: \"kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk\") pod \"certified-operators-lnq2p\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.356957 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.630734 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.649093 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.652727 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.665558 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.707346 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.707409 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6v6t\" (UniqueName: \"kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.707434 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.808537 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.808797 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6v6t\" (UniqueName: \"kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.809150 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.809063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.809795 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.835543 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6v6t\" (UniqueName: \"kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t\") pod \"redhat-operators-h887r\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.918003 4852 generic.go:334] "Generic (PLEG): container finished" podID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerID="892aaa6c23584edcc57e2dc4a02dce14b9c29abbcda182bf55c65d3f445ab5ce" exitCode=0 Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.918096 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerDied","Data":"892aaa6c23584edcc57e2dc4a02dce14b9c29abbcda182bf55c65d3f445ab5ce"} Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.922194 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerID="284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6" exitCode=0 Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.922236 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerDied","Data":"284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6"} Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.922262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerStarted","Data":"43f30dd0d6b616ad7153bd217fc65b8cb5927cefd36e2bbdc8a5d27fe5baf0dd"} Jan 29 11:30:45 crc kubenswrapper[4852]: I0129 11:30:45.996138 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.427212 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:30:46 crc kubenswrapper[4852]: W0129 11:30:46.434062 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55dcee5b_9068_4248_aaa9_1f2516081b99.slice/crio-17d4b42923d6cde979d4719b1cfe49a9f0615516640c7eb82a37121c9e3ea8a7 WatchSource:0}: Error finding container 17d4b42923d6cde979d4719b1cfe49a9f0615516640c7eb82a37121c9e3ea8a7: Status 404 returned error can't find the container with id 17d4b42923d6cde979d4719b1cfe49a9f0615516640c7eb82a37121c9e3ea8a7 Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.930848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerStarted","Data":"458e2e08ebd0857cdc480656b0945fc2f912575e6b51a9226d5d79bd2d606eff"} Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.932874 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerStarted","Data":"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d"} Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.934482 4852 generic.go:334] "Generic (PLEG): container finished" podID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerID="2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b" exitCode=0 Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.934518 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerDied","Data":"2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b"} Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.934538 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerStarted","Data":"17d4b42923d6cde979d4719b1cfe49a9f0615516640c7eb82a37121c9e3ea8a7"} Jan 29 11:30:46 crc kubenswrapper[4852]: I0129 11:30:46.953029 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g7sw8" podStartSLOduration=2.515827002 podStartE2EDuration="4.953007032s" podCreationTimestamp="2026-01-29 11:30:42 +0000 UTC" firstStartedPulling="2026-01-29 11:30:43.8946711 +0000 UTC m=+2941.112002234" lastFinishedPulling="2026-01-29 11:30:46.33185113 +0000 UTC m=+2943.549182264" observedRunningTime="2026-01-29 11:30:46.950322537 +0000 UTC m=+2944.167653671" watchObservedRunningTime="2026-01-29 11:30:46.953007032 +0000 UTC m=+2944.170338176" Jan 29 11:30:47 crc kubenswrapper[4852]: I0129 11:30:47.944842 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerID="9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d" exitCode=0 Jan 29 11:30:47 crc kubenswrapper[4852]: I0129 11:30:47.945810 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerDied","Data":"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d"} Jan 29 11:30:48 crc kubenswrapper[4852]: I0129 11:30:48.239948 4852 scope.go:117] "RemoveContainer" containerID="ada10b056cbe728387762aa69aeaa372c9890784752d9f9038f3259b272234ea" Jan 29 11:30:48 crc kubenswrapper[4852]: I0129 11:30:48.964901 4852 generic.go:334] "Generic (PLEG): container finished" podID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerID="ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd" exitCode=0 Jan 29 11:30:48 crc kubenswrapper[4852]: I0129 11:30:48.964955 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerDied","Data":"ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd"} Jan 29 11:30:49 crc kubenswrapper[4852]: I0129 11:30:49.977115 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerStarted","Data":"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e"} Jan 29 11:30:49 crc kubenswrapper[4852]: I0129 11:30:49.980155 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerStarted","Data":"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47"} Jan 29 11:30:50 crc kubenswrapper[4852]: I0129 11:30:50.004616 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lnq2p" podStartSLOduration=2.148284177 podStartE2EDuration="5.004565919s" podCreationTimestamp="2026-01-29 11:30:45 +0000 UTC" firstStartedPulling="2026-01-29 11:30:45.927486494 +0000 UTC m=+2943.144817628" lastFinishedPulling="2026-01-29 11:30:48.783768236 +0000 UTC m=+2946.001099370" observedRunningTime="2026-01-29 11:30:50.002069458 +0000 UTC m=+2947.219400602" watchObservedRunningTime="2026-01-29 11:30:50.004565919 +0000 UTC m=+2947.221897073" Jan 29 11:30:50 crc kubenswrapper[4852]: I0129 11:30:50.044741 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h887r" podStartSLOduration=2.5588835039999998 podStartE2EDuration="5.044719351s" podCreationTimestamp="2026-01-29 11:30:45 +0000 UTC" firstStartedPulling="2026-01-29 11:30:46.935503288 +0000 UTC m=+2944.152834422" lastFinishedPulling="2026-01-29 11:30:49.421339125 +0000 UTC m=+2946.638670269" observedRunningTime="2026-01-29 11:30:50.035702742 +0000 UTC m=+2947.253033896" watchObservedRunningTime="2026-01-29 11:30:50.044719351 +0000 UTC m=+2947.262050495" Jan 29 11:30:52 crc kubenswrapper[4852]: I0129 11:30:52.464041 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:30:52 crc kubenswrapper[4852]: E0129 11:30:52.464609 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:30:52 crc kubenswrapper[4852]: I0129 11:30:52.960377 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:52 crc kubenswrapper[4852]: I0129 11:30:52.960441 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:53 crc kubenswrapper[4852]: I0129 11:30:53.004016 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:53 crc kubenswrapper[4852]: I0129 11:30:53.054882 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:54 crc kubenswrapper[4852]: I0129 11:30:54.626969 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.016692 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g7sw8" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="registry-server" containerID="cri-o://458e2e08ebd0857cdc480656b0945fc2f912575e6b51a9226d5d79bd2d606eff" gracePeriod=2 Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.358007 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.358061 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.401672 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.997245 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:55 crc kubenswrapper[4852]: I0129 11:30:55.997295 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:30:56 crc kubenswrapper[4852]: I0129 11:30:56.070547 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:57 crc kubenswrapper[4852]: I0129 11:30:57.042967 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h887r" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="registry-server" probeResult="failure" output=< Jan 29 11:30:57 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 11:30:57 crc kubenswrapper[4852]: > Jan 29 11:30:57 crc kubenswrapper[4852]: I0129 11:30:57.826093 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.041095 4852 generic.go:334] "Generic (PLEG): container finished" podID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerID="458e2e08ebd0857cdc480656b0945fc2f912575e6b51a9226d5d79bd2d606eff" exitCode=0 Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.041168 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerDied","Data":"458e2e08ebd0857cdc480656b0945fc2f912575e6b51a9226d5d79bd2d606eff"} Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.041320 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lnq2p" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="registry-server" containerID="cri-o://6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e" gracePeriod=2 Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.242309 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.378702 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities\") pod \"2ac02106-a4d1-4a7f-b576-0f00488ab044\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.379059 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content\") pod \"2ac02106-a4d1-4a7f-b576-0f00488ab044\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.379110 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5vkb\" (UniqueName: \"kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb\") pod \"2ac02106-a4d1-4a7f-b576-0f00488ab044\" (UID: \"2ac02106-a4d1-4a7f-b576-0f00488ab044\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.380660 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities" (OuterVolumeSpecName: "utilities") pod "2ac02106-a4d1-4a7f-b576-0f00488ab044" (UID: "2ac02106-a4d1-4a7f-b576-0f00488ab044"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.393309 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb" (OuterVolumeSpecName: "kube-api-access-w5vkb") pod "2ac02106-a4d1-4a7f-b576-0f00488ab044" (UID: "2ac02106-a4d1-4a7f-b576-0f00488ab044"). InnerVolumeSpecName "kube-api-access-w5vkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.432125 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ac02106-a4d1-4a7f-b576-0f00488ab044" (UID: "2ac02106-a4d1-4a7f-b576-0f00488ab044"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.432510 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.482802 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.482839 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ac02106-a4d1-4a7f-b576-0f00488ab044-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.482848 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5vkb\" (UniqueName: \"kubernetes.io/projected/2ac02106-a4d1-4a7f-b576-0f00488ab044-kube-api-access-w5vkb\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.584192 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hshk\" (UniqueName: \"kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk\") pod \"0e693bae-78c5-432b-86a4-abe43c1f7240\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.584482 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content\") pod \"0e693bae-78c5-432b-86a4-abe43c1f7240\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.584570 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities\") pod \"0e693bae-78c5-432b-86a4-abe43c1f7240\" (UID: \"0e693bae-78c5-432b-86a4-abe43c1f7240\") " Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.586738 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities" (OuterVolumeSpecName: "utilities") pod "0e693bae-78c5-432b-86a4-abe43c1f7240" (UID: "0e693bae-78c5-432b-86a4-abe43c1f7240"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.588863 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk" (OuterVolumeSpecName: "kube-api-access-9hshk") pod "0e693bae-78c5-432b-86a4-abe43c1f7240" (UID: "0e693bae-78c5-432b-86a4-abe43c1f7240"). InnerVolumeSpecName "kube-api-access-9hshk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.647137 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e693bae-78c5-432b-86a4-abe43c1f7240" (UID: "0e693bae-78c5-432b-86a4-abe43c1f7240"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.686508 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.686547 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e693bae-78c5-432b-86a4-abe43c1f7240-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:58 crc kubenswrapper[4852]: I0129 11:30:58.686561 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hshk\" (UniqueName: \"kubernetes.io/projected/0e693bae-78c5-432b-86a4-abe43c1f7240-kube-api-access-9hshk\") on node \"crc\" DevicePath \"\"" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.051740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7sw8" event={"ID":"2ac02106-a4d1-4a7f-b576-0f00488ab044","Type":"ContainerDied","Data":"88a8de5ce00fd809102f9d5dc0f263474c3f78bf5998a030d44380606535701a"} Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.051798 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7sw8" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.051802 4852 scope.go:117] "RemoveContainer" containerID="458e2e08ebd0857cdc480656b0945fc2f912575e6b51a9226d5d79bd2d606eff" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.055384 4852 generic.go:334] "Generic (PLEG): container finished" podID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerID="6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e" exitCode=0 Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.055421 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerDied","Data":"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e"} Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.055443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnq2p" event={"ID":"0e693bae-78c5-432b-86a4-abe43c1f7240","Type":"ContainerDied","Data":"43f30dd0d6b616ad7153bd217fc65b8cb5927cefd36e2bbdc8a5d27fe5baf0dd"} Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.055498 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnq2p" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.076709 4852 scope.go:117] "RemoveContainer" containerID="892aaa6c23584edcc57e2dc4a02dce14b9c29abbcda182bf55c65d3f445ab5ce" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.088995 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.099134 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lnq2p"] Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.108005 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.113382 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g7sw8"] Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.118301 4852 scope.go:117] "RemoveContainer" containerID="98e3a30a8017dabca805feab98f8a921623e08497f7c9705d4637f50c55513cf" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.135213 4852 scope.go:117] "RemoveContainer" containerID="6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.182937 4852 scope.go:117] "RemoveContainer" containerID="9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.201520 4852 scope.go:117] "RemoveContainer" containerID="284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.225851 4852 scope.go:117] "RemoveContainer" containerID="6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e" Jan 29 11:30:59 crc kubenswrapper[4852]: E0129 11:30:59.226348 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e\": container with ID starting with 6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e not found: ID does not exist" containerID="6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.226474 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e"} err="failed to get container status \"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e\": rpc error: code = NotFound desc = could not find container \"6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e\": container with ID starting with 6df65d02019cd5a505b8ca1f107676625913c17e1e6b23cfc1a5ac7a04066e2e not found: ID does not exist" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.226728 4852 scope.go:117] "RemoveContainer" containerID="9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d" Jan 29 11:30:59 crc kubenswrapper[4852]: E0129 11:30:59.227189 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d\": container with ID starting with 9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d not found: ID does not exist" containerID="9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.227229 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d"} err="failed to get container status \"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d\": rpc error: code = NotFound desc = could not find container \"9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d\": container with ID starting with 9ba80097aa7db19c97b854c6f300b471a088329e217966f455002a20026b096d not found: ID does not exist" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.227255 4852 scope.go:117] "RemoveContainer" containerID="284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6" Jan 29 11:30:59 crc kubenswrapper[4852]: E0129 11:30:59.227614 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6\": container with ID starting with 284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6 not found: ID does not exist" containerID="284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.227644 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6"} err="failed to get container status \"284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6\": rpc error: code = NotFound desc = could not find container \"284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6\": container with ID starting with 284fa9a04d2b99c44a503e1c5c801c2042901da22e961ae8dfa12b8026d40af6 not found: ID does not exist" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.472549 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" path="/var/lib/kubelet/pods/0e693bae-78c5-432b-86a4-abe43c1f7240/volumes" Jan 29 11:30:59 crc kubenswrapper[4852]: I0129 11:30:59.473206 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" path="/var/lib/kubelet/pods/2ac02106-a4d1-4a7f-b576-0f00488ab044/volumes" Jan 29 11:31:05 crc kubenswrapper[4852]: I0129 11:31:05.463741 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:31:05 crc kubenswrapper[4852]: E0129 11:31:05.464219 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:31:06 crc kubenswrapper[4852]: I0129 11:31:06.041555 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:31:06 crc kubenswrapper[4852]: I0129 11:31:06.080789 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:31:06 crc kubenswrapper[4852]: I0129 11:31:06.271393 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:31:07 crc kubenswrapper[4852]: I0129 11:31:07.139978 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h887r" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="registry-server" containerID="cri-o://d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47" gracePeriod=2 Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.079206 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.159478 4852 generic.go:334] "Generic (PLEG): container finished" podID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerID="d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47" exitCode=0 Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.159532 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerDied","Data":"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47"} Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.159563 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h887r" event={"ID":"55dcee5b-9068-4248-aaa9-1f2516081b99","Type":"ContainerDied","Data":"17d4b42923d6cde979d4719b1cfe49a9f0615516640c7eb82a37121c9e3ea8a7"} Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.159615 4852 scope.go:117] "RemoveContainer" containerID="d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.159763 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h887r" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.182305 4852 scope.go:117] "RemoveContainer" containerID="ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.205275 4852 scope.go:117] "RemoveContainer" containerID="2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.227399 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content\") pod \"55dcee5b-9068-4248-aaa9-1f2516081b99\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.227479 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities\") pod \"55dcee5b-9068-4248-aaa9-1f2516081b99\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.227565 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6v6t\" (UniqueName: \"kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t\") pod \"55dcee5b-9068-4248-aaa9-1f2516081b99\" (UID: \"55dcee5b-9068-4248-aaa9-1f2516081b99\") " Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.228303 4852 scope.go:117] "RemoveContainer" containerID="d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.228421 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities" (OuterVolumeSpecName: "utilities") pod "55dcee5b-9068-4248-aaa9-1f2516081b99" (UID: "55dcee5b-9068-4248-aaa9-1f2516081b99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:31:08 crc kubenswrapper[4852]: E0129 11:31:08.228849 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47\": container with ID starting with d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47 not found: ID does not exist" containerID="d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.228909 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47"} err="failed to get container status \"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47\": rpc error: code = NotFound desc = could not find container \"d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47\": container with ID starting with d130daaeef1c004f75981abe6306b4ec0714f7202e22b8c6b5653c477e66bb47 not found: ID does not exist" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.228940 4852 scope.go:117] "RemoveContainer" containerID="ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd" Jan 29 11:31:08 crc kubenswrapper[4852]: E0129 11:31:08.229883 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd\": container with ID starting with ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd not found: ID does not exist" containerID="ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.229935 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd"} err="failed to get container status \"ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd\": rpc error: code = NotFound desc = could not find container \"ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd\": container with ID starting with ab2663015274bdd2d27b7c8199751ab7471161388bf6b62dde172be17a5255cd not found: ID does not exist" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.229965 4852 scope.go:117] "RemoveContainer" containerID="2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b" Jan 29 11:31:08 crc kubenswrapper[4852]: E0129 11:31:08.230387 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b\": container with ID starting with 2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b not found: ID does not exist" containerID="2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.230413 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b"} err="failed to get container status \"2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b\": rpc error: code = NotFound desc = could not find container \"2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b\": container with ID starting with 2ee0513144e897164da2c181916c4c01261e6aa1c037c8d6b09bb7da350c650b not found: ID does not exist" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.234199 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t" (OuterVolumeSpecName: "kube-api-access-w6v6t") pod "55dcee5b-9068-4248-aaa9-1f2516081b99" (UID: "55dcee5b-9068-4248-aaa9-1f2516081b99"). InnerVolumeSpecName "kube-api-access-w6v6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.329222 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.329263 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6v6t\" (UniqueName: \"kubernetes.io/projected/55dcee5b-9068-4248-aaa9-1f2516081b99-kube-api-access-w6v6t\") on node \"crc\" DevicePath \"\"" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.357763 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55dcee5b-9068-4248-aaa9-1f2516081b99" (UID: "55dcee5b-9068-4248-aaa9-1f2516081b99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.430458 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55dcee5b-9068-4248-aaa9-1f2516081b99-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.496455 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:31:08 crc kubenswrapper[4852]: I0129 11:31:08.501569 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h887r"] Jan 29 11:31:09 crc kubenswrapper[4852]: I0129 11:31:09.476876 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" path="/var/lib/kubelet/pods/55dcee5b-9068-4248-aaa9-1f2516081b99/volumes" Jan 29 11:31:19 crc kubenswrapper[4852]: I0129 11:31:19.463995 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:31:19 crc kubenswrapper[4852]: E0129 11:31:19.464645 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:31:30 crc kubenswrapper[4852]: I0129 11:31:30.465023 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:31:30 crc kubenswrapper[4852]: E0129 11:31:30.466820 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:31:44 crc kubenswrapper[4852]: I0129 11:31:44.463804 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:31:44 crc kubenswrapper[4852]: E0129 11:31:44.464700 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:31:58 crc kubenswrapper[4852]: I0129 11:31:58.464033 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:31:58 crc kubenswrapper[4852]: E0129 11:31:58.465225 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:32:13 crc kubenswrapper[4852]: I0129 11:32:13.466937 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:32:13 crc kubenswrapper[4852]: E0129 11:32:13.467522 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:32:26 crc kubenswrapper[4852]: I0129 11:32:26.463723 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:32:26 crc kubenswrapper[4852]: E0129 11:32:26.464785 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:32:40 crc kubenswrapper[4852]: I0129 11:32:40.464466 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:32:40 crc kubenswrapper[4852]: E0129 11:32:40.465399 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:32:51 crc kubenswrapper[4852]: I0129 11:32:51.464036 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:32:51 crc kubenswrapper[4852]: E0129 11:32:51.464874 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:33:03 crc kubenswrapper[4852]: I0129 11:33:03.469460 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:33:03 crc kubenswrapper[4852]: E0129 11:33:03.470236 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:33:16 crc kubenswrapper[4852]: I0129 11:33:16.463561 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:33:16 crc kubenswrapper[4852]: E0129 11:33:16.464443 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:33:29 crc kubenswrapper[4852]: I0129 11:33:29.464059 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:33:29 crc kubenswrapper[4852]: E0129 11:33:29.464875 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:33:40 crc kubenswrapper[4852]: I0129 11:33:40.463774 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:33:40 crc kubenswrapper[4852]: E0129 11:33:40.464743 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:33:51 crc kubenswrapper[4852]: I0129 11:33:51.463904 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:33:51 crc kubenswrapper[4852]: E0129 11:33:51.464700 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:34:05 crc kubenswrapper[4852]: I0129 11:34:05.463849 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:34:05 crc kubenswrapper[4852]: E0129 11:34:05.464551 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:34:16 crc kubenswrapper[4852]: I0129 11:34:16.464281 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:34:16 crc kubenswrapper[4852]: E0129 11:34:16.465160 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:34:31 crc kubenswrapper[4852]: I0129 11:34:31.463595 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:34:31 crc kubenswrapper[4852]: E0129 11:34:31.464317 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:34:42 crc kubenswrapper[4852]: I0129 11:34:42.463476 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:34:42 crc kubenswrapper[4852]: E0129 11:34:42.464034 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:34:57 crc kubenswrapper[4852]: I0129 11:34:57.463191 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:34:57 crc kubenswrapper[4852]: E0129 11:34:57.464122 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:35:11 crc kubenswrapper[4852]: I0129 11:35:11.464389 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:35:11 crc kubenswrapper[4852]: E0129 11:35:11.465122 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:35:26 crc kubenswrapper[4852]: I0129 11:35:26.463575 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:35:26 crc kubenswrapper[4852]: E0129 11:35:26.464634 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:35:40 crc kubenswrapper[4852]: I0129 11:35:40.463958 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:35:41 crc kubenswrapper[4852]: I0129 11:35:41.262152 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced"} Jan 29 11:38:00 crc kubenswrapper[4852]: I0129 11:38:00.017712 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:38:00 crc kubenswrapper[4852]: I0129 11:38:00.018938 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:38:30 crc kubenswrapper[4852]: I0129 11:38:30.017230 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:38:30 crc kubenswrapper[4852]: I0129 11:38:30.018916 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.016845 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.017312 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.017354 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.017895 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.017946 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced" gracePeriod=600 Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.942553 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced" exitCode=0 Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.942612 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced"} Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.943161 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd"} Jan 29 11:39:00 crc kubenswrapper[4852]: I0129 11:39:00.943188 4852 scope.go:117] "RemoveContainer" containerID="41e74b03319599169a6c68c43019b2aa133432d5c94e790c19829a9ae379cd64" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.017127 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.017883 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.392663 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393049 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393068 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393080 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393088 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393104 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393112 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393128 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393135 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393146 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393152 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393166 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393172 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393194 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393201 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="extract-utilities" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393217 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393224 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: E0129 11:41:00.393237 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393246 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="extract-content" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393430 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac02106-a4d1-4a7f-b576-0f00488ab044" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393441 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e693bae-78c5-432b-86a4-abe43c1f7240" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.393451 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="55dcee5b-9068-4248-aaa9-1f2516081b99" containerName="registry-server" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.394740 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.406523 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.564175 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxvcl\" (UniqueName: \"kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.564550 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.564640 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.666100 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.666154 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.666252 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxvcl\" (UniqueName: \"kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.667095 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.667118 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.693046 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxvcl\" (UniqueName: \"kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl\") pod \"redhat-operators-7whr7\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:00 crc kubenswrapper[4852]: I0129 11:41:00.722606 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:01 crc kubenswrapper[4852]: I0129 11:41:01.171094 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:01 crc kubenswrapper[4852]: I0129 11:41:01.843561 4852 generic.go:334] "Generic (PLEG): container finished" podID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerID="3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1" exitCode=0 Jan 29 11:41:01 crc kubenswrapper[4852]: I0129 11:41:01.843732 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerDied","Data":"3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1"} Jan 29 11:41:01 crc kubenswrapper[4852]: I0129 11:41:01.844213 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerStarted","Data":"f94829baa7c06371132efcca55d989f17c7cb7166f29f6d719a2c968861daca3"} Jan 29 11:41:01 crc kubenswrapper[4852]: I0129 11:41:01.845574 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:41:02 crc kubenswrapper[4852]: I0129 11:41:02.853193 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerStarted","Data":"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a"} Jan 29 11:41:03 crc kubenswrapper[4852]: I0129 11:41:03.861867 4852 generic.go:334] "Generic (PLEG): container finished" podID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerID="71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a" exitCode=0 Jan 29 11:41:03 crc kubenswrapper[4852]: I0129 11:41:03.861925 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerDied","Data":"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a"} Jan 29 11:41:04 crc kubenswrapper[4852]: I0129 11:41:04.873240 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerStarted","Data":"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b"} Jan 29 11:41:10 crc kubenswrapper[4852]: I0129 11:41:10.723648 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:10 crc kubenswrapper[4852]: I0129 11:41:10.724315 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.196825 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7whr7" podStartSLOduration=8.728820152 podStartE2EDuration="11.196801232s" podCreationTimestamp="2026-01-29 11:41:00 +0000 UTC" firstStartedPulling="2026-01-29 11:41:01.845294965 +0000 UTC m=+3559.062626099" lastFinishedPulling="2026-01-29 11:41:04.313276035 +0000 UTC m=+3561.530607179" observedRunningTime="2026-01-29 11:41:04.895807951 +0000 UTC m=+3562.113139095" watchObservedRunningTime="2026-01-29 11:41:11.196801232 +0000 UTC m=+3568.414132376" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.197918 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.199679 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.212108 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.326947 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.327026 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkbpr\" (UniqueName: \"kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.327052 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.428965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.429094 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkbpr\" (UniqueName: \"kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.429149 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.429578 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.429705 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.448161 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkbpr\" (UniqueName: \"kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr\") pod \"redhat-marketplace-gl9kx\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.518375 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.770697 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7whr7" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="registry-server" probeResult="failure" output=< Jan 29 11:41:11 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 11:41:11 crc kubenswrapper[4852]: > Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.775626 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:11 crc kubenswrapper[4852]: I0129 11:41:11.940468 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerStarted","Data":"0961e86065277a1a33188114db707082d00ae68d9c4fa7f05eab970276611147"} Jan 29 11:41:12 crc kubenswrapper[4852]: I0129 11:41:12.952004 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerID="d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d" exitCode=0 Jan 29 11:41:12 crc kubenswrapper[4852]: I0129 11:41:12.952212 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerDied","Data":"d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d"} Jan 29 11:41:15 crc kubenswrapper[4852]: I0129 11:41:15.974334 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerID="38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137" exitCode=0 Jan 29 11:41:15 crc kubenswrapper[4852]: I0129 11:41:15.974653 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerDied","Data":"38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137"} Jan 29 11:41:16 crc kubenswrapper[4852]: I0129 11:41:16.984186 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerStarted","Data":"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59"} Jan 29 11:41:17 crc kubenswrapper[4852]: I0129 11:41:17.004052 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gl9kx" podStartSLOduration=2.603430386 podStartE2EDuration="6.004033704s" podCreationTimestamp="2026-01-29 11:41:11 +0000 UTC" firstStartedPulling="2026-01-29 11:41:12.954475838 +0000 UTC m=+3570.171806982" lastFinishedPulling="2026-01-29 11:41:16.355079176 +0000 UTC m=+3573.572410300" observedRunningTime="2026-01-29 11:41:16.998778756 +0000 UTC m=+3574.216109900" watchObservedRunningTime="2026-01-29 11:41:17.004033704 +0000 UTC m=+3574.221364838" Jan 29 11:41:20 crc kubenswrapper[4852]: I0129 11:41:20.769641 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:20 crc kubenswrapper[4852]: I0129 11:41:20.843104 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:21 crc kubenswrapper[4852]: I0129 11:41:21.017575 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:21 crc kubenswrapper[4852]: I0129 11:41:21.519503 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:21 crc kubenswrapper[4852]: I0129 11:41:21.519624 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:21 crc kubenswrapper[4852]: I0129 11:41:21.569076 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.031712 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7whr7" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="registry-server" containerID="cri-o://0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b" gracePeriod=2 Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.087805 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.459498 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.603982 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxvcl\" (UniqueName: \"kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl\") pod \"e4aee05a-0fb2-4d84-986a-832a0c439838\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.604096 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities\") pod \"e4aee05a-0fb2-4d84-986a-832a0c439838\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.604180 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content\") pod \"e4aee05a-0fb2-4d84-986a-832a0c439838\" (UID: \"e4aee05a-0fb2-4d84-986a-832a0c439838\") " Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.607378 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities" (OuterVolumeSpecName: "utilities") pod "e4aee05a-0fb2-4d84-986a-832a0c439838" (UID: "e4aee05a-0fb2-4d84-986a-832a0c439838"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.614875 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl" (OuterVolumeSpecName: "kube-api-access-lxvcl") pod "e4aee05a-0fb2-4d84-986a-832a0c439838" (UID: "e4aee05a-0fb2-4d84-986a-832a0c439838"). InnerVolumeSpecName "kube-api-access-lxvcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.706514 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxvcl\" (UniqueName: \"kubernetes.io/projected/e4aee05a-0fb2-4d84-986a-832a0c439838-kube-api-access-lxvcl\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.706570 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.732247 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e4aee05a-0fb2-4d84-986a-832a0c439838" (UID: "e4aee05a-0fb2-4d84-986a-832a0c439838"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:41:22 crc kubenswrapper[4852]: I0129 11:41:22.808356 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4aee05a-0fb2-4d84-986a-832a0c439838-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.048420 4852 generic.go:334] "Generic (PLEG): container finished" podID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerID="0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b" exitCode=0 Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.048523 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerDied","Data":"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b"} Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.048571 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7whr7" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.048673 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7whr7" event={"ID":"e4aee05a-0fb2-4d84-986a-832a0c439838","Type":"ContainerDied","Data":"f94829baa7c06371132efcca55d989f17c7cb7166f29f6d719a2c968861daca3"} Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.048719 4852 scope.go:117] "RemoveContainer" containerID="0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.091925 4852 scope.go:117] "RemoveContainer" containerID="71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.095484 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.102634 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7whr7"] Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.117425 4852 scope.go:117] "RemoveContainer" containerID="3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.134332 4852 scope.go:117] "RemoveContainer" containerID="0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b" Jan 29 11:41:23 crc kubenswrapper[4852]: E0129 11:41:23.134844 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b\": container with ID starting with 0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b not found: ID does not exist" containerID="0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.134907 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b"} err="failed to get container status \"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b\": rpc error: code = NotFound desc = could not find container \"0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b\": container with ID starting with 0a9db37fdcfbe266a30290171f4cd7cdf19056e5e9ee466d4b801b269036dd6b not found: ID does not exist" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.134951 4852 scope.go:117] "RemoveContainer" containerID="71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a" Jan 29 11:41:23 crc kubenswrapper[4852]: E0129 11:41:23.135368 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a\": container with ID starting with 71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a not found: ID does not exist" containerID="71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.135424 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a"} err="failed to get container status \"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a\": rpc error: code = NotFound desc = could not find container \"71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a\": container with ID starting with 71796fe35e0415378010bb565defaca8355731d3aa946a4d4046c1a0a62b079a not found: ID does not exist" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.135464 4852 scope.go:117] "RemoveContainer" containerID="3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1" Jan 29 11:41:23 crc kubenswrapper[4852]: E0129 11:41:23.135904 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1\": container with ID starting with 3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1 not found: ID does not exist" containerID="3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.135941 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1"} err="failed to get container status \"3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1\": rpc error: code = NotFound desc = could not find container \"3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1\": container with ID starting with 3b5c23557f846f3b1ccbe6e92e19c61bf86966002177971252490346c64faab1 not found: ID does not exist" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.476779 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" path="/var/lib/kubelet/pods/e4aee05a-0fb2-4d84-986a-832a0c439838/volumes" Jan 29 11:41:23 crc kubenswrapper[4852]: I0129 11:41:23.815066 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.060025 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gl9kx" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="registry-server" containerID="cri-o://36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59" gracePeriod=2 Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.444276 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.534263 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkbpr\" (UniqueName: \"kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr\") pod \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.534412 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content\") pod \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.534467 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities\") pod \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\" (UID: \"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3\") " Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.535418 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities" (OuterVolumeSpecName: "utilities") pod "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" (UID: "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.537652 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr" (OuterVolumeSpecName: "kube-api-access-rkbpr") pod "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" (UID: "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3"). InnerVolumeSpecName "kube-api-access-rkbpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.567455 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" (UID: "8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.636425 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkbpr\" (UniqueName: \"kubernetes.io/projected/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-kube-api-access-rkbpr\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.636468 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:24 crc kubenswrapper[4852]: I0129 11:41:24.636483 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.072902 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerID="36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59" exitCode=0 Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.073050 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerDied","Data":"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59"} Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.073266 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gl9kx" event={"ID":"8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3","Type":"ContainerDied","Data":"0961e86065277a1a33188114db707082d00ae68d9c4fa7f05eab970276611147"} Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.073291 4852 scope.go:117] "RemoveContainer" containerID="36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.073137 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gl9kx" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.093813 4852 scope.go:117] "RemoveContainer" containerID="38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.112945 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.119030 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gl9kx"] Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.141471 4852 scope.go:117] "RemoveContainer" containerID="d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.162555 4852 scope.go:117] "RemoveContainer" containerID="36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59" Jan 29 11:41:25 crc kubenswrapper[4852]: E0129 11:41:25.162864 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59\": container with ID starting with 36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59 not found: ID does not exist" containerID="36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.162910 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59"} err="failed to get container status \"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59\": rpc error: code = NotFound desc = could not find container \"36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59\": container with ID starting with 36f12bdcb0f565c149571b7015a617f8779dc40657051b5e39f1f94c3a1a7c59 not found: ID does not exist" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.162939 4852 scope.go:117] "RemoveContainer" containerID="38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137" Jan 29 11:41:25 crc kubenswrapper[4852]: E0129 11:41:25.163527 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137\": container with ID starting with 38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137 not found: ID does not exist" containerID="38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.163559 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137"} err="failed to get container status \"38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137\": rpc error: code = NotFound desc = could not find container \"38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137\": container with ID starting with 38e20ccd21a70cb5cb42400523e519ddfe89fe5e58b985dadd98a7cd1e147137 not found: ID does not exist" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.163591 4852 scope.go:117] "RemoveContainer" containerID="d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d" Jan 29 11:41:25 crc kubenswrapper[4852]: E0129 11:41:25.163882 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d\": container with ID starting with d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d not found: ID does not exist" containerID="d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.163928 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d"} err="failed to get container status \"d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d\": rpc error: code = NotFound desc = could not find container \"d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d\": container with ID starting with d022b599a73631d87b7a3e8d80bf771d6a3edd09face42ac378d5ba85e468b7d not found: ID does not exist" Jan 29 11:41:25 crc kubenswrapper[4852]: I0129 11:41:25.474517 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" path="/var/lib/kubelet/pods/8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3/volumes" Jan 29 11:41:30 crc kubenswrapper[4852]: I0129 11:41:30.017539 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:41:30 crc kubenswrapper[4852]: I0129 11:41:30.017976 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.156441 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157479 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157501 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157520 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="extract-content" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157532 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="extract-content" Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157556 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="extract-utilities" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157569 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="extract-utilities" Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157620 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="extract-utilities" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157634 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="extract-utilities" Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157654 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="extract-content" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157666 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="extract-content" Jan 29 11:41:50 crc kubenswrapper[4852]: E0129 11:41:50.157688 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157700 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157935 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f6317d4-3f27-4161-9dbf-d2cfcd63f6a3" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.157960 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4aee05a-0fb2-4d84-986a-832a0c439838" containerName="registry-server" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.159691 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.176974 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.328124 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.328216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hsgv\" (UniqueName: \"kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.328285 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.429863 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.430232 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.430291 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hsgv\" (UniqueName: \"kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.430455 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.430713 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.452890 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hsgv\" (UniqueName: \"kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv\") pod \"certified-operators-vz65p\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.487440 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:41:50 crc kubenswrapper[4852]: I0129 11:41:50.764949 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:41:51 crc kubenswrapper[4852]: I0129 11:41:51.305652 4852 generic.go:334] "Generic (PLEG): container finished" podID="09ada291-491b-4777-a990-e772c195d2ee" containerID="6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444" exitCode=0 Jan 29 11:41:51 crc kubenswrapper[4852]: I0129 11:41:51.305712 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerDied","Data":"6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444"} Jan 29 11:41:51 crc kubenswrapper[4852]: I0129 11:41:51.305746 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerStarted","Data":"202690f0ab3de4c655a913614db56b7c4a064611a69309b106a2d45f3fbe4757"} Jan 29 11:41:52 crc kubenswrapper[4852]: I0129 11:41:52.315721 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerStarted","Data":"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a"} Jan 29 11:41:53 crc kubenswrapper[4852]: I0129 11:41:53.326696 4852 generic.go:334] "Generic (PLEG): container finished" podID="09ada291-491b-4777-a990-e772c195d2ee" containerID="12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a" exitCode=0 Jan 29 11:41:53 crc kubenswrapper[4852]: I0129 11:41:53.326757 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerDied","Data":"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a"} Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.131210 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.133038 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.141128 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.284466 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.284523 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.285048 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t944d\" (UniqueName: \"kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.340570 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerStarted","Data":"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c"} Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.362803 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vz65p" podStartSLOduration=1.818575442 podStartE2EDuration="4.362783192s" podCreationTimestamp="2026-01-29 11:41:50 +0000 UTC" firstStartedPulling="2026-01-29 11:41:51.307297925 +0000 UTC m=+3608.524629079" lastFinishedPulling="2026-01-29 11:41:53.851505695 +0000 UTC m=+3611.068836829" observedRunningTime="2026-01-29 11:41:54.360803474 +0000 UTC m=+3611.578134608" watchObservedRunningTime="2026-01-29 11:41:54.362783192 +0000 UTC m=+3611.580114326" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.386611 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.386868 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.387060 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t944d\" (UniqueName: \"kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.387084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.387342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.412837 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t944d\" (UniqueName: \"kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d\") pod \"community-operators-qpfdl\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.455514 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:41:54 crc kubenswrapper[4852]: I0129 11:41:54.942648 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:41:55 crc kubenswrapper[4852]: I0129 11:41:55.351917 4852 generic.go:334] "Generic (PLEG): container finished" podID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerID="dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95" exitCode=0 Jan 29 11:41:55 crc kubenswrapper[4852]: I0129 11:41:55.352413 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerDied","Data":"dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95"} Jan 29 11:41:55 crc kubenswrapper[4852]: I0129 11:41:55.352454 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerStarted","Data":"c134061995d77cf79782679a5e28dc648a5ac33c8ca3b650c09a311cebfa5116"} Jan 29 11:41:56 crc kubenswrapper[4852]: I0129 11:41:56.360784 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerStarted","Data":"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3"} Jan 29 11:41:57 crc kubenswrapper[4852]: I0129 11:41:57.376714 4852 generic.go:334] "Generic (PLEG): container finished" podID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerID="2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3" exitCode=0 Jan 29 11:41:57 crc kubenswrapper[4852]: I0129 11:41:57.376768 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerDied","Data":"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3"} Jan 29 11:41:58 crc kubenswrapper[4852]: I0129 11:41:58.386216 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerStarted","Data":"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7"} Jan 29 11:41:58 crc kubenswrapper[4852]: I0129 11:41:58.411914 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qpfdl" podStartSLOduration=1.957838338 podStartE2EDuration="4.411892508s" podCreationTimestamp="2026-01-29 11:41:54 +0000 UTC" firstStartedPulling="2026-01-29 11:41:55.353384807 +0000 UTC m=+3612.570715941" lastFinishedPulling="2026-01-29 11:41:57.807438977 +0000 UTC m=+3615.024770111" observedRunningTime="2026-01-29 11:41:58.406673541 +0000 UTC m=+3615.624004695" watchObservedRunningTime="2026-01-29 11:41:58.411892508 +0000 UTC m=+3615.629223662" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.017268 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.017708 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.017780 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.018855 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.018963 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" gracePeriod=600 Jan 29 11:42:00 crc kubenswrapper[4852]: E0129 11:42:00.157247 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.404357 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" exitCode=0 Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.404404 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd"} Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.404438 4852 scope.go:117] "RemoveContainer" containerID="79fbfeb75f3b45b1f47110cbbb4f15a4c98420d1649d08ab55e5c59582aa8ced" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.405167 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:00 crc kubenswrapper[4852]: E0129 11:42:00.405538 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.487785 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.488165 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:00 crc kubenswrapper[4852]: I0129 11:42:00.527965 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:01 crc kubenswrapper[4852]: I0129 11:42:01.472569 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:01 crc kubenswrapper[4852]: I0129 11:42:01.724744 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:42:03 crc kubenswrapper[4852]: I0129 11:42:03.428221 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vz65p" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="registry-server" containerID="cri-o://18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c" gracePeriod=2 Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.347023 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.435021 4852 generic.go:334] "Generic (PLEG): container finished" podID="09ada291-491b-4777-a990-e772c195d2ee" containerID="18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c" exitCode=0 Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.435067 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerDied","Data":"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c"} Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.435098 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65p" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.435099 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65p" event={"ID":"09ada291-491b-4777-a990-e772c195d2ee","Type":"ContainerDied","Data":"202690f0ab3de4c655a913614db56b7c4a064611a69309b106a2d45f3fbe4757"} Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.435110 4852 scope.go:117] "RemoveContainer" containerID="18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.443979 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hsgv\" (UniqueName: \"kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv\") pod \"09ada291-491b-4777-a990-e772c195d2ee\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.444090 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content\") pod \"09ada291-491b-4777-a990-e772c195d2ee\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.444177 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities\") pod \"09ada291-491b-4777-a990-e772c195d2ee\" (UID: \"09ada291-491b-4777-a990-e772c195d2ee\") " Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.445111 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities" (OuterVolumeSpecName: "utilities") pod "09ada291-491b-4777-a990-e772c195d2ee" (UID: "09ada291-491b-4777-a990-e772c195d2ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.452262 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv" (OuterVolumeSpecName: "kube-api-access-6hsgv") pod "09ada291-491b-4777-a990-e772c195d2ee" (UID: "09ada291-491b-4777-a990-e772c195d2ee"). InnerVolumeSpecName "kube-api-access-6hsgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.455352 4852 scope.go:117] "RemoveContainer" containerID="12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.455681 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.456663 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.486196 4852 scope.go:117] "RemoveContainer" containerID="6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.497457 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09ada291-491b-4777-a990-e772c195d2ee" (UID: "09ada291-491b-4777-a990-e772c195d2ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.506225 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.515277 4852 scope.go:117] "RemoveContainer" containerID="18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c" Jan 29 11:42:04 crc kubenswrapper[4852]: E0129 11:42:04.515714 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c\": container with ID starting with 18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c not found: ID does not exist" containerID="18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.515835 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c"} err="failed to get container status \"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c\": rpc error: code = NotFound desc = could not find container \"18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c\": container with ID starting with 18c94d04f8a996c05371e18e0a94ec95e413d19a818e5131ab4cc9081d11c10c not found: ID does not exist" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.515930 4852 scope.go:117] "RemoveContainer" containerID="12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a" Jan 29 11:42:04 crc kubenswrapper[4852]: E0129 11:42:04.516241 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a\": container with ID starting with 12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a not found: ID does not exist" containerID="12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.516332 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a"} err="failed to get container status \"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a\": rpc error: code = NotFound desc = could not find container \"12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a\": container with ID starting with 12c2353f7a911fd674cdb3cfde571880c5a048639bbbedfa5689e4ece774a14a not found: ID does not exist" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.516420 4852 scope.go:117] "RemoveContainer" containerID="6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444" Jan 29 11:42:04 crc kubenswrapper[4852]: E0129 11:42:04.516859 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444\": container with ID starting with 6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444 not found: ID does not exist" containerID="6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.516956 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444"} err="failed to get container status \"6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444\": rpc error: code = NotFound desc = could not find container \"6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444\": container with ID starting with 6852340a6fe3bb5302af3840f70b6833cea358b4f735ca437b69b7e227143444 not found: ID does not exist" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.545567 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.545639 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ada291-491b-4777-a990-e772c195d2ee-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.545653 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hsgv\" (UniqueName: \"kubernetes.io/projected/09ada291-491b-4777-a990-e772c195d2ee-kube-api-access-6hsgv\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.778916 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:42:04 crc kubenswrapper[4852]: I0129 11:42:04.790036 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vz65p"] Jan 29 11:42:05 crc kubenswrapper[4852]: I0129 11:42:05.485936 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ada291-491b-4777-a990-e772c195d2ee" path="/var/lib/kubelet/pods/09ada291-491b-4777-a990-e772c195d2ee/volumes" Jan 29 11:42:05 crc kubenswrapper[4852]: I0129 11:42:05.511827 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:07 crc kubenswrapper[4852]: I0129 11:42:07.920698 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:42:08 crc kubenswrapper[4852]: I0129 11:42:08.469468 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qpfdl" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="registry-server" containerID="cri-o://1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7" gracePeriod=2 Jan 29 11:42:08 crc kubenswrapper[4852]: I0129 11:42:08.847429 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.018861 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content\") pod \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.019240 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t944d\" (UniqueName: \"kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d\") pod \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.019378 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities\") pod \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\" (UID: \"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd\") " Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.020359 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities" (OuterVolumeSpecName: "utilities") pod "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" (UID: "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.025911 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d" (OuterVolumeSpecName: "kube-api-access-t944d") pod "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" (UID: "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd"). InnerVolumeSpecName "kube-api-access-t944d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.079830 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" (UID: "d00dbc27-4bbd-4e78-a8e3-c0e727245bcd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.120739 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.120786 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t944d\" (UniqueName: \"kubernetes.io/projected/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-kube-api-access-t944d\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.120802 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.486384 4852 generic.go:334] "Generic (PLEG): container finished" podID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerID="1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7" exitCode=0 Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.486439 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerDied","Data":"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7"} Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.486444 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpfdl" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.486485 4852 scope.go:117] "RemoveContainer" containerID="1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.486472 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpfdl" event={"ID":"d00dbc27-4bbd-4e78-a8e3-c0e727245bcd","Type":"ContainerDied","Data":"c134061995d77cf79782679a5e28dc648a5ac33c8ca3b650c09a311cebfa5116"} Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.520986 4852 scope.go:117] "RemoveContainer" containerID="2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.522003 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.532637 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qpfdl"] Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.542277 4852 scope.go:117] "RemoveContainer" containerID="dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.573265 4852 scope.go:117] "RemoveContainer" containerID="1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7" Jan 29 11:42:09 crc kubenswrapper[4852]: E0129 11:42:09.573737 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7\": container with ID starting with 1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7 not found: ID does not exist" containerID="1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.573809 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7"} err="failed to get container status \"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7\": rpc error: code = NotFound desc = could not find container \"1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7\": container with ID starting with 1e02bb0c94c32bf72bbda62e743d458f4a6b429508da97354860a4549a1b14e7 not found: ID does not exist" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.573858 4852 scope.go:117] "RemoveContainer" containerID="2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3" Jan 29 11:42:09 crc kubenswrapper[4852]: E0129 11:42:09.574435 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3\": container with ID starting with 2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3 not found: ID does not exist" containerID="2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.574478 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3"} err="failed to get container status \"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3\": rpc error: code = NotFound desc = could not find container \"2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3\": container with ID starting with 2b094e6561c008f08c4a627c5e02b8027faae533dfe94a9821c561569cc4d8f3 not found: ID does not exist" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.574505 4852 scope.go:117] "RemoveContainer" containerID="dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95" Jan 29 11:42:09 crc kubenswrapper[4852]: E0129 11:42:09.574859 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95\": container with ID starting with dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95 not found: ID does not exist" containerID="dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95" Jan 29 11:42:09 crc kubenswrapper[4852]: I0129 11:42:09.574906 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95"} err="failed to get container status \"dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95\": rpc error: code = NotFound desc = could not find container \"dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95\": container with ID starting with dadf10fe3093929fc1680aa00a418a36c3e2c56f94080326fca41e273fd01f95 not found: ID does not exist" Jan 29 11:42:11 crc kubenswrapper[4852]: I0129 11:42:11.464718 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:11 crc kubenswrapper[4852]: E0129 11:42:11.465572 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:11 crc kubenswrapper[4852]: I0129 11:42:11.476257 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" path="/var/lib/kubelet/pods/d00dbc27-4bbd-4e78-a8e3-c0e727245bcd/volumes" Jan 29 11:42:22 crc kubenswrapper[4852]: I0129 11:42:22.464256 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:22 crc kubenswrapper[4852]: E0129 11:42:22.465305 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:33 crc kubenswrapper[4852]: I0129 11:42:33.471753 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:33 crc kubenswrapper[4852]: E0129 11:42:33.472651 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:45 crc kubenswrapper[4852]: I0129 11:42:45.464085 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:45 crc kubenswrapper[4852]: E0129 11:42:45.465362 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:42:56 crc kubenswrapper[4852]: I0129 11:42:56.463759 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:42:56 crc kubenswrapper[4852]: E0129 11:42:56.464628 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:43:11 crc kubenswrapper[4852]: I0129 11:43:11.464105 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:43:11 crc kubenswrapper[4852]: E0129 11:43:11.464998 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:43:22 crc kubenswrapper[4852]: I0129 11:43:22.463657 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:43:22 crc kubenswrapper[4852]: E0129 11:43:22.464458 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:43:35 crc kubenswrapper[4852]: I0129 11:43:35.464078 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:43:35 crc kubenswrapper[4852]: E0129 11:43:35.466207 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:43:48 crc kubenswrapper[4852]: I0129 11:43:48.464359 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:43:48 crc kubenswrapper[4852]: E0129 11:43:48.465291 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:44:00 crc kubenswrapper[4852]: I0129 11:44:00.463508 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:44:00 crc kubenswrapper[4852]: E0129 11:44:00.464565 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:44:14 crc kubenswrapper[4852]: I0129 11:44:14.464954 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:44:14 crc kubenswrapper[4852]: E0129 11:44:14.465978 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:44:27 crc kubenswrapper[4852]: I0129 11:44:27.464696 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:44:27 crc kubenswrapper[4852]: E0129 11:44:27.465666 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:44:42 crc kubenswrapper[4852]: I0129 11:44:42.464172 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:44:42 crc kubenswrapper[4852]: E0129 11:44:42.465135 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:44:56 crc kubenswrapper[4852]: I0129 11:44:56.464096 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:44:56 crc kubenswrapper[4852]: E0129 11:44:56.466320 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.192120 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd"] Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.192936 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="extract-utilities" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.192956 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="extract-utilities" Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.192973 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="extract-content" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.192983 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="extract-content" Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.193008 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="extract-utilities" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193018 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="extract-utilities" Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.193043 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193052 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.193069 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="extract-content" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193079 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="extract-content" Jan 29 11:45:00 crc kubenswrapper[4852]: E0129 11:45:00.193105 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193117 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193351 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d00dbc27-4bbd-4e78-a8e3-c0e727245bcd" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.193384 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ada291-491b-4777-a990-e772c195d2ee" containerName="registry-server" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.194107 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.197327 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.198004 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.200011 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd"] Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.307017 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.307090 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.307118 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lszs5\" (UniqueName: \"kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.408945 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.409003 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lszs5\" (UniqueName: \"kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.409077 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.410457 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.419392 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.428320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lszs5\" (UniqueName: \"kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5\") pod \"collect-profiles-29494785-tg7cd\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.516896 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.747097 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd"] Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.962008 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" event={"ID":"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454","Type":"ContainerStarted","Data":"4b0400d42bc46f4768d249663bbb5992750075cf8635716db5156b8b1b2e73b8"} Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.962056 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" event={"ID":"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454","Type":"ContainerStarted","Data":"5feab05eda12649a7d98c993b26b19eaf94fcb70742e1b46b7c22366f5912fb3"} Jan 29 11:45:00 crc kubenswrapper[4852]: I0129 11:45:00.978825 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" podStartSLOduration=0.978804654 podStartE2EDuration="978.804654ms" podCreationTimestamp="2026-01-29 11:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 11:45:00.977809849 +0000 UTC m=+3798.195140993" watchObservedRunningTime="2026-01-29 11:45:00.978804654 +0000 UTC m=+3798.196135788" Jan 29 11:45:01 crc kubenswrapper[4852]: I0129 11:45:01.972525 4852 generic.go:334] "Generic (PLEG): container finished" podID="0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" containerID="4b0400d42bc46f4768d249663bbb5992750075cf8635716db5156b8b1b2e73b8" exitCode=0 Jan 29 11:45:01 crc kubenswrapper[4852]: I0129 11:45:01.972862 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" event={"ID":"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454","Type":"ContainerDied","Data":"4b0400d42bc46f4768d249663bbb5992750075cf8635716db5156b8b1b2e73b8"} Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.235320 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.355698 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume\") pod \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.356091 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lszs5\" (UniqueName: \"kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5\") pod \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.356317 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume\") pod \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\" (UID: \"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454\") " Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.356826 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume" (OuterVolumeSpecName: "config-volume") pod "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" (UID: "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.362659 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5" (OuterVolumeSpecName: "kube-api-access-lszs5") pod "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" (UID: "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454"). InnerVolumeSpecName "kube-api-access-lszs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.364785 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" (UID: "0d12e5a7-a0e7-4c98-bdb0-9a697ad80454"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.461953 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.461989 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.461999 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lszs5\" (UniqueName: \"kubernetes.io/projected/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454-kube-api-access-lszs5\") on node \"crc\" DevicePath \"\"" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.989131 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" event={"ID":"0d12e5a7-a0e7-4c98-bdb0-9a697ad80454","Type":"ContainerDied","Data":"5feab05eda12649a7d98c993b26b19eaf94fcb70742e1b46b7c22366f5912fb3"} Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.989178 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5feab05eda12649a7d98c993b26b19eaf94fcb70742e1b46b7c22366f5912fb3" Jan 29 11:45:03 crc kubenswrapper[4852]: I0129 11:45:03.989225 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd" Jan 29 11:45:04 crc kubenswrapper[4852]: I0129 11:45:04.312897 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz"] Jan 29 11:45:04 crc kubenswrapper[4852]: I0129 11:45:04.317846 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494740-mwtrz"] Jan 29 11:45:05 crc kubenswrapper[4852]: I0129 11:45:05.475870 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d1dac7e-d83f-4d2f-bc9f-fd197a406a47" path="/var/lib/kubelet/pods/4d1dac7e-d83f-4d2f-bc9f-fd197a406a47/volumes" Jan 29 11:45:11 crc kubenswrapper[4852]: I0129 11:45:11.463373 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:45:11 crc kubenswrapper[4852]: E0129 11:45:11.464024 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:45:22 crc kubenswrapper[4852]: I0129 11:45:22.463273 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:45:22 crc kubenswrapper[4852]: E0129 11:45:22.464287 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:45:36 crc kubenswrapper[4852]: I0129 11:45:36.464187 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:45:36 crc kubenswrapper[4852]: E0129 11:45:36.464857 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:45:48 crc kubenswrapper[4852]: I0129 11:45:48.631492 4852 scope.go:117] "RemoveContainer" containerID="cacc58bc0392c9c8309ea56f2b407ffb1227c73dfbc8cdc947cccef4d096e1a3" Jan 29 11:45:50 crc kubenswrapper[4852]: I0129 11:45:50.463698 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:45:50 crc kubenswrapper[4852]: E0129 11:45:50.464505 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:46:01 crc kubenswrapper[4852]: I0129 11:46:01.463423 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:46:01 crc kubenswrapper[4852]: E0129 11:46:01.464221 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:46:12 crc kubenswrapper[4852]: I0129 11:46:12.463798 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:46:12 crc kubenswrapper[4852]: E0129 11:46:12.464492 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:46:25 crc kubenswrapper[4852]: I0129 11:46:25.464156 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:46:25 crc kubenswrapper[4852]: E0129 11:46:25.465119 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:46:36 crc kubenswrapper[4852]: I0129 11:46:36.463894 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:46:36 crc kubenswrapper[4852]: E0129 11:46:36.464558 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:46:51 crc kubenswrapper[4852]: I0129 11:46:51.464120 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:46:51 crc kubenswrapper[4852]: E0129 11:46:51.465521 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:47:02 crc kubenswrapper[4852]: I0129 11:47:02.463738 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:47:02 crc kubenswrapper[4852]: I0129 11:47:02.918423 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6"} Jan 29 11:49:30 crc kubenswrapper[4852]: I0129 11:49:30.017524 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:49:30 crc kubenswrapper[4852]: I0129 11:49:30.021471 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:50:00 crc kubenswrapper[4852]: I0129 11:50:00.017357 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:50:00 crc kubenswrapper[4852]: I0129 11:50:00.018170 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.016993 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.017536 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.017591 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.018184 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.018242 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6" gracePeriod=600 Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.806311 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6" exitCode=0 Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.806377 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6"} Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.806837 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b"} Jan 29 11:50:30 crc kubenswrapper[4852]: I0129 11:50:30.806859 4852 scope.go:117] "RemoveContainer" containerID="0cce7fce3fc292f9a16f6a65691e6b87910dc7b2be097d8062c0bd6cf481e3fd" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.495003 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:19 crc kubenswrapper[4852]: E0129 11:52:19.496087 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" containerName="collect-profiles" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.496109 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" containerName="collect-profiles" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.496456 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" containerName="collect-profiles" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.498409 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.504773 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.598734 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7hwz\" (UniqueName: \"kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.598862 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.598889 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.700064 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.700329 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.700453 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7hwz\" (UniqueName: \"kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.700569 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.700776 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.726846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7hwz\" (UniqueName: \"kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz\") pod \"community-operators-pp5w7\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:19 crc kubenswrapper[4852]: I0129 11:52:19.824154 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:20 crc kubenswrapper[4852]: I0129 11:52:20.291625 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:20 crc kubenswrapper[4852]: I0129 11:52:20.786350 4852 generic.go:334] "Generic (PLEG): container finished" podID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerID="cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b" exitCode=0 Jan 29 11:52:20 crc kubenswrapper[4852]: I0129 11:52:20.786424 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerDied","Data":"cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b"} Jan 29 11:52:20 crc kubenswrapper[4852]: I0129 11:52:20.786683 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerStarted","Data":"8f3c70af615b930acf19512316415ba8699ace2dac72022cb676262007122a18"} Jan 29 11:52:20 crc kubenswrapper[4852]: I0129 11:52:20.788779 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:52:21 crc kubenswrapper[4852]: I0129 11:52:21.795023 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerStarted","Data":"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6"} Jan 29 11:52:22 crc kubenswrapper[4852]: I0129 11:52:22.809164 4852 generic.go:334] "Generic (PLEG): container finished" podID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerID="abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6" exitCode=0 Jan 29 11:52:22 crc kubenswrapper[4852]: I0129 11:52:22.809430 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerDied","Data":"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6"} Jan 29 11:52:23 crc kubenswrapper[4852]: I0129 11:52:23.822454 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerStarted","Data":"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3"} Jan 29 11:52:23 crc kubenswrapper[4852]: I0129 11:52:23.842330 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pp5w7" podStartSLOduration=2.375001297 podStartE2EDuration="4.842305691s" podCreationTimestamp="2026-01-29 11:52:19 +0000 UTC" firstStartedPulling="2026-01-29 11:52:20.788467797 +0000 UTC m=+4238.005798931" lastFinishedPulling="2026-01-29 11:52:23.255772171 +0000 UTC m=+4240.473103325" observedRunningTime="2026-01-29 11:52:23.839339859 +0000 UTC m=+4241.056671033" watchObservedRunningTime="2026-01-29 11:52:23.842305691 +0000 UTC m=+4241.059636855" Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.870652 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.874072 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.885852 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.911348 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.911436 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:26 crc kubenswrapper[4852]: I0129 11:52:26.911526 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ddzf\" (UniqueName: \"kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.012808 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.012877 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.012923 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ddzf\" (UniqueName: \"kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.013378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.013512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.038471 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ddzf\" (UniqueName: \"kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf\") pod \"certified-operators-nnvzd\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.211702 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.743824 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:27 crc kubenswrapper[4852]: I0129 11:52:27.866046 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerStarted","Data":"804fff37b187876a4f0a5890af3f6406c3fc5f7d621638d0dc7184aac0cb6f71"} Jan 29 11:52:28 crc kubenswrapper[4852]: I0129 11:52:28.877455 4852 generic.go:334] "Generic (PLEG): container finished" podID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerID="db4ef81a994f2559ade11f85b23b7e522605117e763b1e7d8cf3d8bc1f8b5bdc" exitCode=0 Jan 29 11:52:28 crc kubenswrapper[4852]: I0129 11:52:28.877501 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerDied","Data":"db4ef81a994f2559ade11f85b23b7e522605117e763b1e7d8cf3d8bc1f8b5bdc"} Jan 29 11:52:29 crc kubenswrapper[4852]: I0129 11:52:29.825353 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:29 crc kubenswrapper[4852]: I0129 11:52:29.825853 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:29 crc kubenswrapper[4852]: I0129 11:52:29.890352 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerStarted","Data":"4dbb86833a3e47f81789d9b1e6d903bfc1e25168dbae1b2001cd75f9027507c0"} Jan 29 11:52:29 crc kubenswrapper[4852]: I0129 11:52:29.907367 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:29 crc kubenswrapper[4852]: I0129 11:52:29.988444 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:30 crc kubenswrapper[4852]: I0129 11:52:30.018426 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:52:30 crc kubenswrapper[4852]: I0129 11:52:30.018517 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:52:30 crc kubenswrapper[4852]: I0129 11:52:30.905784 4852 generic.go:334] "Generic (PLEG): container finished" podID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerID="4dbb86833a3e47f81789d9b1e6d903bfc1e25168dbae1b2001cd75f9027507c0" exitCode=0 Jan 29 11:52:30 crc kubenswrapper[4852]: I0129 11:52:30.906009 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerDied","Data":"4dbb86833a3e47f81789d9b1e6d903bfc1e25168dbae1b2001cd75f9027507c0"} Jan 29 11:52:31 crc kubenswrapper[4852]: I0129 11:52:31.919249 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerStarted","Data":"e1a21f5e69312c4f80e2d2b103fe542700a979703de061edb2ae2b6c2aaadb03"} Jan 29 11:52:31 crc kubenswrapper[4852]: I0129 11:52:31.943450 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nnvzd" podStartSLOduration=3.5041248019999998 podStartE2EDuration="5.943427253s" podCreationTimestamp="2026-01-29 11:52:26 +0000 UTC" firstStartedPulling="2026-01-29 11:52:28.87976273 +0000 UTC m=+4246.097093864" lastFinishedPulling="2026-01-29 11:52:31.319065151 +0000 UTC m=+4248.536396315" observedRunningTime="2026-01-29 11:52:31.940301947 +0000 UTC m=+4249.157633091" watchObservedRunningTime="2026-01-29 11:52:31.943427253 +0000 UTC m=+4249.160758387" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.242116 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.242944 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pp5w7" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="registry-server" containerID="cri-o://f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3" gracePeriod=2 Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.699402 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.722128 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content\") pod \"4472e6c8-2442-4864-97f5-d41c4790bf2e\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.722191 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities\") pod \"4472e6c8-2442-4864-97f5-d41c4790bf2e\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.722265 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7hwz\" (UniqueName: \"kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz\") pod \"4472e6c8-2442-4864-97f5-d41c4790bf2e\" (UID: \"4472e6c8-2442-4864-97f5-d41c4790bf2e\") " Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.724661 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities" (OuterVolumeSpecName: "utilities") pod "4472e6c8-2442-4864-97f5-d41c4790bf2e" (UID: "4472e6c8-2442-4864-97f5-d41c4790bf2e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.724999 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.730045 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz" (OuterVolumeSpecName: "kube-api-access-z7hwz") pod "4472e6c8-2442-4864-97f5-d41c4790bf2e" (UID: "4472e6c8-2442-4864-97f5-d41c4790bf2e"). InnerVolumeSpecName "kube-api-access-z7hwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.781542 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4472e6c8-2442-4864-97f5-d41c4790bf2e" (UID: "4472e6c8-2442-4864-97f5-d41c4790bf2e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.826726 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4472e6c8-2442-4864-97f5-d41c4790bf2e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.826759 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7hwz\" (UniqueName: \"kubernetes.io/projected/4472e6c8-2442-4864-97f5-d41c4790bf2e-kube-api-access-z7hwz\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.927731 4852 generic.go:334] "Generic (PLEG): container finished" podID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerID="f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3" exitCode=0 Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.927819 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerDied","Data":"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3"} Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.927857 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pp5w7" event={"ID":"4472e6c8-2442-4864-97f5-d41c4790bf2e","Type":"ContainerDied","Data":"8f3c70af615b930acf19512316415ba8699ace2dac72022cb676262007122a18"} Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.927875 4852 scope.go:117] "RemoveContainer" containerID="f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.927998 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pp5w7" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.946270 4852 scope.go:117] "RemoveContainer" containerID="abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.956729 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.962307 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pp5w7"] Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.976207 4852 scope.go:117] "RemoveContainer" containerID="cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.992001 4852 scope.go:117] "RemoveContainer" containerID="f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3" Jan 29 11:52:32 crc kubenswrapper[4852]: E0129 11:52:32.992347 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3\": container with ID starting with f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3 not found: ID does not exist" containerID="f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.992372 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3"} err="failed to get container status \"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3\": rpc error: code = NotFound desc = could not find container \"f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3\": container with ID starting with f32ea3863a5324bba0300c1860b79e76a80c707668df8192cfdb6b621e68c7a3 not found: ID does not exist" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.992392 4852 scope.go:117] "RemoveContainer" containerID="abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6" Jan 29 11:52:32 crc kubenswrapper[4852]: E0129 11:52:32.992675 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6\": container with ID starting with abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6 not found: ID does not exist" containerID="abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.992706 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6"} err="failed to get container status \"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6\": rpc error: code = NotFound desc = could not find container \"abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6\": container with ID starting with abad0caf6b82294577aef7a6a6d48949c065322d8e78843c549e342aca682cc6 not found: ID does not exist" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.992719 4852 scope.go:117] "RemoveContainer" containerID="cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b" Jan 29 11:52:32 crc kubenswrapper[4852]: E0129 11:52:32.993397 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b\": container with ID starting with cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b not found: ID does not exist" containerID="cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b" Jan 29 11:52:32 crc kubenswrapper[4852]: I0129 11:52:32.993447 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b"} err="failed to get container status \"cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b\": rpc error: code = NotFound desc = could not find container \"cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b\": container with ID starting with cadc4d2811780b33cb065ffbaf65bb1b1bfed61f9fd7390cdcf7ac7adcb62a0b not found: ID does not exist" Jan 29 11:52:33 crc kubenswrapper[4852]: I0129 11:52:33.480498 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" path="/var/lib/kubelet/pods/4472e6c8-2442-4864-97f5-d41c4790bf2e/volumes" Jan 29 11:52:37 crc kubenswrapper[4852]: I0129 11:52:37.212313 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:37 crc kubenswrapper[4852]: I0129 11:52:37.212798 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:37 crc kubenswrapper[4852]: I0129 11:52:37.279619 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:38 crc kubenswrapper[4852]: I0129 11:52:38.014576 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:38 crc kubenswrapper[4852]: I0129 11:52:38.237321 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:39 crc kubenswrapper[4852]: I0129 11:52:39.986428 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nnvzd" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="registry-server" containerID="cri-o://e1a21f5e69312c4f80e2d2b103fe542700a979703de061edb2ae2b6c2aaadb03" gracePeriod=2 Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.002949 4852 generic.go:334] "Generic (PLEG): container finished" podID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerID="e1a21f5e69312c4f80e2d2b103fe542700a979703de061edb2ae2b6c2aaadb03" exitCode=0 Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.003014 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerDied","Data":"e1a21f5e69312c4f80e2d2b103fe542700a979703de061edb2ae2b6c2aaadb03"} Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.319251 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.461827 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ddzf\" (UniqueName: \"kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf\") pod \"2c7c0a23-394b-47f5-88e4-442e622874f3\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.462182 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content\") pod \"2c7c0a23-394b-47f5-88e4-442e622874f3\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.463733 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities\") pod \"2c7c0a23-394b-47f5-88e4-442e622874f3\" (UID: \"2c7c0a23-394b-47f5-88e4-442e622874f3\") " Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.465060 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities" (OuterVolumeSpecName: "utilities") pod "2c7c0a23-394b-47f5-88e4-442e622874f3" (UID: "2c7c0a23-394b-47f5-88e4-442e622874f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.468013 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf" (OuterVolumeSpecName: "kube-api-access-5ddzf") pod "2c7c0a23-394b-47f5-88e4-442e622874f3" (UID: "2c7c0a23-394b-47f5-88e4-442e622874f3"). InnerVolumeSpecName "kube-api-access-5ddzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.566085 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.566117 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ddzf\" (UniqueName: \"kubernetes.io/projected/2c7c0a23-394b-47f5-88e4-442e622874f3-kube-api-access-5ddzf\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:41 crc kubenswrapper[4852]: I0129 11:52:41.981024 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c7c0a23-394b-47f5-88e4-442e622874f3" (UID: "2c7c0a23-394b-47f5-88e4-442e622874f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.012436 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nnvzd" event={"ID":"2c7c0a23-394b-47f5-88e4-442e622874f3","Type":"ContainerDied","Data":"804fff37b187876a4f0a5890af3f6406c3fc5f7d621638d0dc7184aac0cb6f71"} Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.012506 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nnvzd" Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.012526 4852 scope.go:117] "RemoveContainer" containerID="e1a21f5e69312c4f80e2d2b103fe542700a979703de061edb2ae2b6c2aaadb03" Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.034198 4852 scope.go:117] "RemoveContainer" containerID="4dbb86833a3e47f81789d9b1e6d903bfc1e25168dbae1b2001cd75f9027507c0" Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.077117 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.079604 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c7c0a23-394b-47f5-88e4-442e622874f3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.086091 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nnvzd"] Jan 29 11:52:42 crc kubenswrapper[4852]: I0129 11:52:42.089952 4852 scope.go:117] "RemoveContainer" containerID="db4ef81a994f2559ade11f85b23b7e522605117e763b1e7d8cf3d8bc1f8b5bdc" Jan 29 11:52:43 crc kubenswrapper[4852]: I0129 11:52:43.484827 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" path="/var/lib/kubelet/pods/2c7c0a23-394b-47f5-88e4-442e622874f3/volumes" Jan 29 11:53:00 crc kubenswrapper[4852]: I0129 11:53:00.017277 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:53:00 crc kubenswrapper[4852]: I0129 11:53:00.017996 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.017012 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.017771 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.017842 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.018753 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.018847 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" gracePeriod=600 Jan 29 11:53:30 crc kubenswrapper[4852]: E0129 11:53:30.156163 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.452355 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" exitCode=0 Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.452446 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b"} Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.452754 4852 scope.go:117] "RemoveContainer" containerID="8ff3dcc878e656e053194c07a1eb5a678217e03a4d7bece92d79c035f902e2a6" Jan 29 11:53:30 crc kubenswrapper[4852]: I0129 11:53:30.453279 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:53:30 crc kubenswrapper[4852]: E0129 11:53:30.453534 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:53:43 crc kubenswrapper[4852]: I0129 11:53:43.473545 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:53:43 crc kubenswrapper[4852]: E0129 11:53:43.474317 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:53:55 crc kubenswrapper[4852]: I0129 11:53:55.464038 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:53:55 crc kubenswrapper[4852]: E0129 11:53:55.466135 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:54:10 crc kubenswrapper[4852]: I0129 11:54:10.463994 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:54:10 crc kubenswrapper[4852]: E0129 11:54:10.464687 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:54:23 crc kubenswrapper[4852]: I0129 11:54:23.489890 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:54:23 crc kubenswrapper[4852]: E0129 11:54:23.490935 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:54:35 crc kubenswrapper[4852]: I0129 11:54:35.464885 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:54:35 crc kubenswrapper[4852]: E0129 11:54:35.466028 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:54:49 crc kubenswrapper[4852]: I0129 11:54:49.464216 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:54:49 crc kubenswrapper[4852]: E0129 11:54:49.465302 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:55:00 crc kubenswrapper[4852]: I0129 11:55:00.464133 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:55:00 crc kubenswrapper[4852]: E0129 11:55:00.465113 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:55:12 crc kubenswrapper[4852]: I0129 11:55:12.465407 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:55:12 crc kubenswrapper[4852]: E0129 11:55:12.466672 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:55:26 crc kubenswrapper[4852]: I0129 11:55:26.463743 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:55:26 crc kubenswrapper[4852]: E0129 11:55:26.464900 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:55:40 crc kubenswrapper[4852]: I0129 11:55:40.463460 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:55:40 crc kubenswrapper[4852]: E0129 11:55:40.464293 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:55:55 crc kubenswrapper[4852]: I0129 11:55:55.463684 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:55:55 crc kubenswrapper[4852]: E0129 11:55:55.464648 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:56:07 crc kubenswrapper[4852]: I0129 11:56:07.463515 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:56:07 crc kubenswrapper[4852]: E0129 11:56:07.464760 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:56:20 crc kubenswrapper[4852]: I0129 11:56:20.463409 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:56:20 crc kubenswrapper[4852]: E0129 11:56:20.464293 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:56:32 crc kubenswrapper[4852]: I0129 11:56:32.464258 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:56:32 crc kubenswrapper[4852]: E0129 11:56:32.465720 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.424423 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-mprg5"] Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.436764 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-mprg5"] Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.542854 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-scw8h"] Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.543813 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="extract-content" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.543849 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="extract-content" Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.543870 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.543882 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.544286 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="extract-utilities" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.544401 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="extract-utilities" Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.544425 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.544438 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.544672 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="extract-content" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.544716 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="extract-content" Jan 29 11:56:46 crc kubenswrapper[4852]: E0129 11:56:46.544756 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="extract-utilities" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.544770 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="extract-utilities" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.545200 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c7c0a23-394b-47f5-88e4-442e622874f3" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.545251 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4472e6c8-2442-4864-97f5-d41c4790bf2e" containerName="registry-server" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.545959 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.551813 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.552060 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.552184 4852 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-4ld2w" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.552452 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.563808 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-scw8h"] Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.621453 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.621538 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfjlc\" (UniqueName: \"kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.621577 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.722772 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.722849 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfjlc\" (UniqueName: \"kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.723063 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.723667 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.725730 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.747873 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfjlc\" (UniqueName: \"kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc\") pod \"crc-storage-crc-scw8h\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:46 crc kubenswrapper[4852]: I0129 11:56:46.867365 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:47 crc kubenswrapper[4852]: I0129 11:56:47.333686 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-scw8h"] Jan 29 11:56:47 crc kubenswrapper[4852]: I0129 11:56:47.464187 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:56:47 crc kubenswrapper[4852]: E0129 11:56:47.464669 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:56:47 crc kubenswrapper[4852]: I0129 11:56:47.473386 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b0e648-79cf-41be-82ff-1850c3dd519d" path="/var/lib/kubelet/pods/79b0e648-79cf-41be-82ff-1850c3dd519d/volumes" Jan 29 11:56:48 crc kubenswrapper[4852]: I0129 11:56:48.128358 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-scw8h" event={"ID":"6db3baf2-33d6-43eb-a8ec-2fda8cac689c","Type":"ContainerStarted","Data":"d4dc398c5aad1e544aa8bf58e8c2c0880359bc98f5dd529793c6aa82c7582234"} Jan 29 11:56:48 crc kubenswrapper[4852]: I0129 11:56:48.128833 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-scw8h" event={"ID":"6db3baf2-33d6-43eb-a8ec-2fda8cac689c","Type":"ContainerStarted","Data":"30aa6ae8cfd9f2f59315e5d5c54759b019fd0dd9961ed4ad38e1a10636c1e156"} Jan 29 11:56:48 crc kubenswrapper[4852]: I0129 11:56:48.152602 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="crc-storage/crc-storage-crc-scw8h" podStartSLOduration=1.635703509 podStartE2EDuration="2.152561913s" podCreationTimestamp="2026-01-29 11:56:46 +0000 UTC" firstStartedPulling="2026-01-29 11:56:47.347297816 +0000 UTC m=+4504.564628960" lastFinishedPulling="2026-01-29 11:56:47.86415621 +0000 UTC m=+4505.081487364" observedRunningTime="2026-01-29 11:56:48.149795765 +0000 UTC m=+4505.367126909" watchObservedRunningTime="2026-01-29 11:56:48.152561913 +0000 UTC m=+4505.369893047" Jan 29 11:56:48 crc kubenswrapper[4852]: I0129 11:56:48.843496 4852 scope.go:117] "RemoveContainer" containerID="11456debc4eb2650e6db68c2991e53f1c5827ebfcc4a4d2dab561ab0bd5fa57c" Jan 29 11:56:49 crc kubenswrapper[4852]: I0129 11:56:49.136859 4852 generic.go:334] "Generic (PLEG): container finished" podID="6db3baf2-33d6-43eb-a8ec-2fda8cac689c" containerID="d4dc398c5aad1e544aa8bf58e8c2c0880359bc98f5dd529793c6aa82c7582234" exitCode=0 Jan 29 11:56:49 crc kubenswrapper[4852]: I0129 11:56:49.136941 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-scw8h" event={"ID":"6db3baf2-33d6-43eb-a8ec-2fda8cac689c","Type":"ContainerDied","Data":"d4dc398c5aad1e544aa8bf58e8c2c0880359bc98f5dd529793c6aa82c7582234"} Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.472210 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.599726 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt\") pod \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.599851 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage\") pod \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.599854 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "6db3baf2-33d6-43eb-a8ec-2fda8cac689c" (UID: "6db3baf2-33d6-43eb-a8ec-2fda8cac689c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.599963 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfjlc\" (UniqueName: \"kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc\") pod \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\" (UID: \"6db3baf2-33d6-43eb-a8ec-2fda8cac689c\") " Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.600281 4852 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.605741 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc" (OuterVolumeSpecName: "kube-api-access-bfjlc") pod "6db3baf2-33d6-43eb-a8ec-2fda8cac689c" (UID: "6db3baf2-33d6-43eb-a8ec-2fda8cac689c"). InnerVolumeSpecName "kube-api-access-bfjlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.622338 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "6db3baf2-33d6-43eb-a8ec-2fda8cac689c" (UID: "6db3baf2-33d6-43eb-a8ec-2fda8cac689c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.701205 4852 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:50 crc kubenswrapper[4852]: I0129 11:56:50.701239 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfjlc\" (UniqueName: \"kubernetes.io/projected/6db3baf2-33d6-43eb-a8ec-2fda8cac689c-kube-api-access-bfjlc\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:51 crc kubenswrapper[4852]: I0129 11:56:51.158901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-scw8h" event={"ID":"6db3baf2-33d6-43eb-a8ec-2fda8cac689c","Type":"ContainerDied","Data":"30aa6ae8cfd9f2f59315e5d5c54759b019fd0dd9961ed4ad38e1a10636c1e156"} Jan 29 11:56:51 crc kubenswrapper[4852]: I0129 11:56:51.158956 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30aa6ae8cfd9f2f59315e5d5c54759b019fd0dd9961ed4ad38e1a10636c1e156" Jan 29 11:56:51 crc kubenswrapper[4852]: I0129 11:56:51.158988 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-scw8h" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.164590 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-scw8h"] Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.169636 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-scw8h"] Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.287452 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-pc5nv"] Jan 29 11:56:52 crc kubenswrapper[4852]: E0129 11:56:52.288705 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6db3baf2-33d6-43eb-a8ec-2fda8cac689c" containerName="storage" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.288742 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6db3baf2-33d6-43eb-a8ec-2fda8cac689c" containerName="storage" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.289094 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6db3baf2-33d6-43eb-a8ec-2fda8cac689c" containerName="storage" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.290126 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.293307 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.293917 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.294166 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.294577 4852 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-4ld2w" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.298410 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-pc5nv"] Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.328040 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.328114 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lcrg\" (UniqueName: \"kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.328184 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.429718 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.430194 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.430204 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.430393 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lcrg\" (UniqueName: \"kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.431407 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.859748 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lcrg\" (UniqueName: \"kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg\") pod \"crc-storage-crc-pc5nv\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:52 crc kubenswrapper[4852]: I0129 11:56:52.914963 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:53 crc kubenswrapper[4852]: I0129 11:56:53.381334 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-pc5nv"] Jan 29 11:56:53 crc kubenswrapper[4852]: I0129 11:56:53.479358 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6db3baf2-33d6-43eb-a8ec-2fda8cac689c" path="/var/lib/kubelet/pods/6db3baf2-33d6-43eb-a8ec-2fda8cac689c/volumes" Jan 29 11:56:54 crc kubenswrapper[4852]: I0129 11:56:54.181487 4852 generic.go:334] "Generic (PLEG): container finished" podID="be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" containerID="9f0eb77f1e33e37915a2e14793d130beb2cb61b721ef6b8a077faf85fb45d8eb" exitCode=0 Jan 29 11:56:54 crc kubenswrapper[4852]: I0129 11:56:54.181685 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-pc5nv" event={"ID":"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c","Type":"ContainerDied","Data":"9f0eb77f1e33e37915a2e14793d130beb2cb61b721ef6b8a077faf85fb45d8eb"} Jan 29 11:56:54 crc kubenswrapper[4852]: I0129 11:56:54.181860 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-pc5nv" event={"ID":"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c","Type":"ContainerStarted","Data":"0cfe55bff04ccd6f0501481ea70b2f62dea703777c5c642be47d2d28a872c4ae"} Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.507437 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.681890 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt\") pod \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.682049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" (UID: "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.682172 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lcrg\" (UniqueName: \"kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg\") pod \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.682441 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage\") pod \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\" (UID: \"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c\") " Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.683140 4852 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.687344 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg" (OuterVolumeSpecName: "kube-api-access-8lcrg") pod "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" (UID: "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c"). InnerVolumeSpecName "kube-api-access-8lcrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.707222 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" (UID: "be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.784753 4852 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:55 crc kubenswrapper[4852]: I0129 11:56:55.784809 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lcrg\" (UniqueName: \"kubernetes.io/projected/be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c-kube-api-access-8lcrg\") on node \"crc\" DevicePath \"\"" Jan 29 11:56:56 crc kubenswrapper[4852]: I0129 11:56:56.216913 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-pc5nv" event={"ID":"be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c","Type":"ContainerDied","Data":"0cfe55bff04ccd6f0501481ea70b2f62dea703777c5c642be47d2d28a872c4ae"} Jan 29 11:56:56 crc kubenswrapper[4852]: I0129 11:56:56.217317 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cfe55bff04ccd6f0501481ea70b2f62dea703777c5c642be47d2d28a872c4ae" Jan 29 11:56:56 crc kubenswrapper[4852]: I0129 11:56:56.216955 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-pc5nv" Jan 29 11:56:58 crc kubenswrapper[4852]: I0129 11:56:58.464296 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:56:58 crc kubenswrapper[4852]: E0129 11:56:58.465262 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:57:12 crc kubenswrapper[4852]: I0129 11:57:12.463903 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:57:12 crc kubenswrapper[4852]: E0129 11:57:12.464847 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:57:26 crc kubenswrapper[4852]: I0129 11:57:26.464041 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:57:26 crc kubenswrapper[4852]: E0129 11:57:26.465311 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:57:41 crc kubenswrapper[4852]: I0129 11:57:41.463735 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:57:41 crc kubenswrapper[4852]: E0129 11:57:41.464460 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:57:56 crc kubenswrapper[4852]: I0129 11:57:56.464846 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:57:56 crc kubenswrapper[4852]: E0129 11:57:56.465610 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:58:10 crc kubenswrapper[4852]: I0129 11:58:10.463693 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:58:10 crc kubenswrapper[4852]: E0129 11:58:10.464356 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:58:22 crc kubenswrapper[4852]: I0129 11:58:22.464055 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:58:22 crc kubenswrapper[4852]: E0129 11:58:22.465635 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 11:58:34 crc kubenswrapper[4852]: I0129 11:58:34.464095 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 11:58:35 crc kubenswrapper[4852]: I0129 11:58:34.999704 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492"} Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.080234 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:29 crc kubenswrapper[4852]: E0129 11:59:29.081087 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" containerName="storage" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.081101 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" containerName="storage" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.081258 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be1574b4-cb0e-4c4c-ac7e-90c6e5955e4c" containerName="storage" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.082421 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.102701 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.114943 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rc96j\" (UniqueName: \"kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.115271 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.115341 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.216869 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.216919 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.217011 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rc96j\" (UniqueName: \"kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.217524 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.217571 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.235870 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rc96j\" (UniqueName: \"kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j\") pod \"redhat-marketplace-tbgl7\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.280059 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.281762 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.293246 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.318032 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.318114 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89flg\" (UniqueName: \"kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.318172 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.419195 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.419820 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.419904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.419965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89flg\" (UniqueName: \"kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.420367 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.420471 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.446261 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89flg\" (UniqueName: \"kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg\") pod \"redhat-operators-h4z7p\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.599385 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:29 crc kubenswrapper[4852]: I0129 11:59:29.902237 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.067664 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:30 crc kubenswrapper[4852]: W0129 11:59:30.079083 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30b57117_042b_449f_87d5_6fc978a9d958.slice/crio-6ccb00abe19a4e1901317c7204e21563e6ff76a8bc00db07e4e26d24d9a0e6f1 WatchSource:0}: Error finding container 6ccb00abe19a4e1901317c7204e21563e6ff76a8bc00db07e4e26d24d9a0e6f1: Status 404 returned error can't find the container with id 6ccb00abe19a4e1901317c7204e21563e6ff76a8bc00db07e4e26d24d9a0e6f1 Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.453175 4852 generic.go:334] "Generic (PLEG): container finished" podID="30b57117-042b-449f-87d5-6fc978a9d958" containerID="bde11a730e7cf989f19b61e7f769a08e92b00e2864bc8eed471c435c5ebf4cf4" exitCode=0 Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.453230 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerDied","Data":"bde11a730e7cf989f19b61e7f769a08e92b00e2864bc8eed471c435c5ebf4cf4"} Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.453280 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerStarted","Data":"6ccb00abe19a4e1901317c7204e21563e6ff76a8bc00db07e4e26d24d9a0e6f1"} Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.454748 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.455336 4852 generic.go:334] "Generic (PLEG): container finished" podID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerID="9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831" exitCode=0 Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.455375 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerDied","Data":"9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831"} Jan 29 11:59:30 crc kubenswrapper[4852]: I0129 11:59:30.455403 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerStarted","Data":"3ffa80291e92837b5f908a2f166c080868dff77adf6f3c69b33007e6d94838ef"} Jan 29 11:59:32 crc kubenswrapper[4852]: I0129 11:59:32.471150 4852 generic.go:334] "Generic (PLEG): container finished" podID="30b57117-042b-449f-87d5-6fc978a9d958" containerID="08e013c97b023474e20778498dfb58970a5797229068b153ddfeb04fe6c3c9fa" exitCode=0 Jan 29 11:59:32 crc kubenswrapper[4852]: I0129 11:59:32.471239 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerDied","Data":"08e013c97b023474e20778498dfb58970a5797229068b153ddfeb04fe6c3c9fa"} Jan 29 11:59:32 crc kubenswrapper[4852]: I0129 11:59:32.473761 4852 generic.go:334] "Generic (PLEG): container finished" podID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerID="c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805" exitCode=0 Jan 29 11:59:32 crc kubenswrapper[4852]: I0129 11:59:32.473795 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerDied","Data":"c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805"} Jan 29 11:59:33 crc kubenswrapper[4852]: I0129 11:59:33.484666 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerStarted","Data":"eb004bc1453f87ffa45abd6364ca13b6fb4675943dc466c5766d996666436dde"} Jan 29 11:59:33 crc kubenswrapper[4852]: I0129 11:59:33.488460 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerStarted","Data":"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e"} Jan 29 11:59:34 crc kubenswrapper[4852]: I0129 11:59:34.513265 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tbgl7" podStartSLOduration=2.735741011 podStartE2EDuration="5.513242972s" podCreationTimestamp="2026-01-29 11:59:29 +0000 UTC" firstStartedPulling="2026-01-29 11:59:30.456709901 +0000 UTC m=+4667.674041025" lastFinishedPulling="2026-01-29 11:59:33.234211852 +0000 UTC m=+4670.451542986" observedRunningTime="2026-01-29 11:59:34.511313675 +0000 UTC m=+4671.728644809" watchObservedRunningTime="2026-01-29 11:59:34.513242972 +0000 UTC m=+4671.730574106" Jan 29 11:59:34 crc kubenswrapper[4852]: I0129 11:59:34.517298 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h4z7p" podStartSLOduration=2.866650763 podStartE2EDuration="5.51726583s" podCreationTimestamp="2026-01-29 11:59:29 +0000 UTC" firstStartedPulling="2026-01-29 11:59:30.454488676 +0000 UTC m=+4667.671819810" lastFinishedPulling="2026-01-29 11:59:33.105103743 +0000 UTC m=+4670.322434877" observedRunningTime="2026-01-29 11:59:33.516155777 +0000 UTC m=+4670.733486921" watchObservedRunningTime="2026-01-29 11:59:34.51726583 +0000 UTC m=+4671.734596994" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.419450 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.419840 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.500641 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.587556 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.600481 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.600535 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.638097 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:39 crc kubenswrapper[4852]: I0129 11:59:39.732151 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:40 crc kubenswrapper[4852]: I0129 11:59:40.602255 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:41 crc kubenswrapper[4852]: I0129 11:59:41.546947 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tbgl7" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="registry-server" containerID="cri-o://d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e" gracePeriod=2 Jan 29 11:59:41 crc kubenswrapper[4852]: I0129 11:59:41.939532 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.011727 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.102328 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities\") pod \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.102471 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rc96j\" (UniqueName: \"kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j\") pod \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.102496 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content\") pod \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\" (UID: \"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1\") " Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.103276 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities" (OuterVolumeSpecName: "utilities") pod "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" (UID: "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.103858 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.129838 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j" (OuterVolumeSpecName: "kube-api-access-rc96j") pod "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" (UID: "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1"). InnerVolumeSpecName "kube-api-access-rc96j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.205340 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rc96j\" (UniqueName: \"kubernetes.io/projected/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-kube-api-access-rc96j\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559100 4852 generic.go:334] "Generic (PLEG): container finished" podID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerID="d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e" exitCode=0 Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559173 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tbgl7" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559215 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerDied","Data":"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e"} Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559243 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tbgl7" event={"ID":"0fe5669b-13c8-48a6-bd20-8eeb4eb443d1","Type":"ContainerDied","Data":"3ffa80291e92837b5f908a2f166c080868dff77adf6f3c69b33007e6d94838ef"} Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559262 4852 scope.go:117] "RemoveContainer" containerID="d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.559656 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h4z7p" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="registry-server" containerID="cri-o://eb004bc1453f87ffa45abd6364ca13b6fb4675943dc466c5766d996666436dde" gracePeriod=2 Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.592217 4852 scope.go:117] "RemoveContainer" containerID="c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.613995 4852 scope.go:117] "RemoveContainer" containerID="9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.645778 4852 scope.go:117] "RemoveContainer" containerID="d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e" Jan 29 11:59:42 crc kubenswrapper[4852]: E0129 11:59:42.646357 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e\": container with ID starting with d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e not found: ID does not exist" containerID="d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.646400 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e"} err="failed to get container status \"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e\": rpc error: code = NotFound desc = could not find container \"d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e\": container with ID starting with d489e558ed4cd4a42f841143f93a5fdbb0350b71b781e0929ba640d17c26d83e not found: ID does not exist" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.646426 4852 scope.go:117] "RemoveContainer" containerID="c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805" Jan 29 11:59:42 crc kubenswrapper[4852]: E0129 11:59:42.647656 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805\": container with ID starting with c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805 not found: ID does not exist" containerID="c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.647720 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805"} err="failed to get container status \"c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805\": rpc error: code = NotFound desc = could not find container \"c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805\": container with ID starting with c558903d1b2d01ea43792700a64c7dadfed5eeaaddf47debf28c4531234d8805 not found: ID does not exist" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.647767 4852 scope.go:117] "RemoveContainer" containerID="9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831" Jan 29 11:59:42 crc kubenswrapper[4852]: E0129 11:59:42.648069 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831\": container with ID starting with 9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831 not found: ID does not exist" containerID="9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831" Jan 29 11:59:42 crc kubenswrapper[4852]: I0129 11:59:42.648097 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831"} err="failed to get container status \"9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831\": rpc error: code = NotFound desc = could not find container \"9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831\": container with ID starting with 9d89405522ea52cf9ef8a1c306757e1d8801052b9cfd7003efe369e898581831 not found: ID does not exist" Jan 29 11:59:43 crc kubenswrapper[4852]: I0129 11:59:43.092991 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" (UID: "0fe5669b-13c8-48a6-bd20-8eeb4eb443d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:59:43 crc kubenswrapper[4852]: I0129 11:59:43.118548 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:43 crc kubenswrapper[4852]: I0129 11:59:43.211629 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:43 crc kubenswrapper[4852]: I0129 11:59:43.219086 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tbgl7"] Jan 29 11:59:43 crc kubenswrapper[4852]: I0129 11:59:43.475000 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" path="/var/lib/kubelet/pods/0fe5669b-13c8-48a6-bd20-8eeb4eb443d1/volumes" Jan 29 11:59:45 crc kubenswrapper[4852]: I0129 11:59:45.592603 4852 generic.go:334] "Generic (PLEG): container finished" podID="30b57117-042b-449f-87d5-6fc978a9d958" containerID="eb004bc1453f87ffa45abd6364ca13b6fb4675943dc466c5766d996666436dde" exitCode=0 Jan 29 11:59:45 crc kubenswrapper[4852]: I0129 11:59:45.592725 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerDied","Data":"eb004bc1453f87ffa45abd6364ca13b6fb4675943dc466c5766d996666436dde"} Jan 29 11:59:45 crc kubenswrapper[4852]: I0129 11:59:45.988903 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.164144 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities\") pod \"30b57117-042b-449f-87d5-6fc978a9d958\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.164813 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content\") pod \"30b57117-042b-449f-87d5-6fc978a9d958\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.164945 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89flg\" (UniqueName: \"kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg\") pod \"30b57117-042b-449f-87d5-6fc978a9d958\" (UID: \"30b57117-042b-449f-87d5-6fc978a9d958\") " Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.165602 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities" (OuterVolumeSpecName: "utilities") pod "30b57117-042b-449f-87d5-6fc978a9d958" (UID: "30b57117-042b-449f-87d5-6fc978a9d958"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.170518 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg" (OuterVolumeSpecName: "kube-api-access-89flg") pod "30b57117-042b-449f-87d5-6fc978a9d958" (UID: "30b57117-042b-449f-87d5-6fc978a9d958"). InnerVolumeSpecName "kube-api-access-89flg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.266368 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.266410 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89flg\" (UniqueName: \"kubernetes.io/projected/30b57117-042b-449f-87d5-6fc978a9d958-kube-api-access-89flg\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.317766 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30b57117-042b-449f-87d5-6fc978a9d958" (UID: "30b57117-042b-449f-87d5-6fc978a9d958"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.367607 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b57117-042b-449f-87d5-6fc978a9d958-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.602010 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h4z7p" event={"ID":"30b57117-042b-449f-87d5-6fc978a9d958","Type":"ContainerDied","Data":"6ccb00abe19a4e1901317c7204e21563e6ff76a8bc00db07e4e26d24d9a0e6f1"} Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.602077 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h4z7p" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.602085 4852 scope.go:117] "RemoveContainer" containerID="eb004bc1453f87ffa45abd6364ca13b6fb4675943dc466c5766d996666436dde" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.621839 4852 scope.go:117] "RemoveContainer" containerID="08e013c97b023474e20778498dfb58970a5797229068b153ddfeb04fe6c3c9fa" Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.643748 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.649835 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h4z7p"] Jan 29 11:59:46 crc kubenswrapper[4852]: I0129 11:59:46.650784 4852 scope.go:117] "RemoveContainer" containerID="bde11a730e7cf989f19b61e7f769a08e92b00e2864bc8eed471c435c5ebf4cf4" Jan 29 11:59:47 crc kubenswrapper[4852]: I0129 11:59:47.473751 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30b57117-042b-449f-87d5-6fc978a9d958" path="/var/lib/kubelet/pods/30b57117-042b-449f-87d5-6fc978a9d958/volumes" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.171723 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj"] Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172760 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172779 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172796 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172805 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172818 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="extract-utilities" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172828 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="extract-utilities" Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172849 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="extract-content" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172859 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="extract-content" Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172871 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="extract-content" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172879 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="extract-content" Jan 29 12:00:00 crc kubenswrapper[4852]: E0129 12:00:00.172893 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="extract-utilities" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.172901 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="extract-utilities" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.173078 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="30b57117-042b-449f-87d5-6fc978a9d958" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.173095 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fe5669b-13c8-48a6-bd20-8eeb4eb443d1" containerName="registry-server" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.173675 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.176370 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.176665 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.197498 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj"] Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.268656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.268973 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.269186 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnvnq\" (UniqueName: \"kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.370196 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.370248 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.370314 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnvnq\" (UniqueName: \"kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.371204 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.377881 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.387816 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnvnq\" (UniqueName: \"kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq\") pod \"collect-profiles-29494800-s2wbj\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.494138 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:00 crc kubenswrapper[4852]: I0129 12:00:00.728787 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj"] Jan 29 12:00:01 crc kubenswrapper[4852]: I0129 12:00:01.722402 4852 generic.go:334] "Generic (PLEG): container finished" podID="79f45844-2d28-463e-b303-22d2275003cd" containerID="7ea58d16a7baaf3e8d62a16c2880605cd00a97ad317fc41715decd16a7886f1e" exitCode=0 Jan 29 12:00:01 crc kubenswrapper[4852]: I0129 12:00:01.722511 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" event={"ID":"79f45844-2d28-463e-b303-22d2275003cd","Type":"ContainerDied","Data":"7ea58d16a7baaf3e8d62a16c2880605cd00a97ad317fc41715decd16a7886f1e"} Jan 29 12:00:01 crc kubenswrapper[4852]: I0129 12:00:01.722765 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" event={"ID":"79f45844-2d28-463e-b303-22d2275003cd","Type":"ContainerStarted","Data":"b1b4f6c8d88ecccc9607de5da641a47e209dade6c95df8cbddcefda7065c2184"} Jan 29 12:00:02 crc kubenswrapper[4852]: I0129 12:00:02.982750 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.009555 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnvnq\" (UniqueName: \"kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq\") pod \"79f45844-2d28-463e-b303-22d2275003cd\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.009828 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume\") pod \"79f45844-2d28-463e-b303-22d2275003cd\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.009875 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume\") pod \"79f45844-2d28-463e-b303-22d2275003cd\" (UID: \"79f45844-2d28-463e-b303-22d2275003cd\") " Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.010789 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume" (OuterVolumeSpecName: "config-volume") pod "79f45844-2d28-463e-b303-22d2275003cd" (UID: "79f45844-2d28-463e-b303-22d2275003cd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.026942 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "79f45844-2d28-463e-b303-22d2275003cd" (UID: "79f45844-2d28-463e-b303-22d2275003cd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.027286 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq" (OuterVolumeSpecName: "kube-api-access-mnvnq") pod "79f45844-2d28-463e-b303-22d2275003cd" (UID: "79f45844-2d28-463e-b303-22d2275003cd"). InnerVolumeSpecName "kube-api-access-mnvnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.111261 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnvnq\" (UniqueName: \"kubernetes.io/projected/79f45844-2d28-463e-b303-22d2275003cd-kube-api-access-mnvnq\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.111303 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79f45844-2d28-463e-b303-22d2275003cd-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.111320 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79f45844-2d28-463e-b303-22d2275003cd-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.739331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" event={"ID":"79f45844-2d28-463e-b303-22d2275003cd","Type":"ContainerDied","Data":"b1b4f6c8d88ecccc9607de5da641a47e209dade6c95df8cbddcefda7065c2184"} Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.739392 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1b4f6c8d88ecccc9607de5da641a47e209dade6c95df8cbddcefda7065c2184" Jan 29 12:00:03 crc kubenswrapper[4852]: I0129 12:00:03.739477 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj" Jan 29 12:00:04 crc kubenswrapper[4852]: I0129 12:00:04.063402 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j"] Jan 29 12:00:04 crc kubenswrapper[4852]: I0129 12:00:04.070220 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494755-bc75j"] Jan 29 12:00:05 crc kubenswrapper[4852]: I0129 12:00:05.473432 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2cb8a99-9341-4d10-95a4-ac5ad8d3f583" path="/var/lib/kubelet/pods/c2cb8a99-9341-4d10-95a4-ac5ad8d3f583/volumes" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.741842 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:09 crc kubenswrapper[4852]: E0129 12:00:09.742975 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f45844-2d28-463e-b303-22d2275003cd" containerName="collect-profiles" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.742989 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f45844-2d28-463e-b303-22d2275003cd" containerName="collect-profiles" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.743157 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="79f45844-2d28-463e-b303-22d2275003cd" containerName="collect-profiles" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.743906 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.746223 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.746396 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-nx425" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.746424 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.747138 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.747963 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.751392 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.907646 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f44kb\" (UniqueName: \"kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.907707 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:09 crc kubenswrapper[4852]: I0129 12:00:09.907778 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.008956 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f44kb\" (UniqueName: \"kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.009031 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.009120 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.010016 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.010237 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.011453 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.011535 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.034900 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f44kb\" (UniqueName: \"kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb\") pod \"dnsmasq-dns-5d7b5456f5-r5kvk\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.056066 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.068250 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.110305 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.110394 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6r7j\" (UniqueName: \"kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.110433 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.212268 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6r7j\" (UniqueName: \"kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.212670 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.213036 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.213658 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.214744 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.241998 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6r7j\" (UniqueName: \"kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j\") pod \"dnsmasq-dns-98ddfc8f-6bhsf\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.326254 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.534316 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:10 crc kubenswrapper[4852]: W0129 12:00:10.537769 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod113edc2d_a9a7_4550_8e9c_20beba0fcefc.slice/crio-21adffc26bae89cbef8014164959230ec7ed6b118898d344124baf9c4f58b5b9 WatchSource:0}: Error finding container 21adffc26bae89cbef8014164959230ec7ed6b118898d344124baf9c4f58b5b9: Status 404 returned error can't find the container with id 21adffc26bae89cbef8014164959230ec7ed6b118898d344124baf9c4f58b5b9 Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.737350 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.814475 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" event={"ID":"469ec5ee-8c2e-4207-8b6a-66770804daad","Type":"ContainerStarted","Data":"7089f7c54afbce7c064bae32e2460dba8f96872498d585bf362df7ab17ce39d1"} Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.824379 4852 generic.go:334] "Generic (PLEG): container finished" podID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerID="7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581" exitCode=0 Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.824419 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" event={"ID":"113edc2d-a9a7-4550-8e9c-20beba0fcefc","Type":"ContainerDied","Data":"7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581"} Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.824443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" event={"ID":"113edc2d-a9a7-4550-8e9c-20beba0fcefc","Type":"ContainerStarted","Data":"21adffc26bae89cbef8014164959230ec7ed6b118898d344124baf9c4f58b5b9"} Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.896800 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.911048 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.915288 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.915348 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.915411 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-n5knh" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.915560 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.915628 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 12:00:10 crc kubenswrapper[4852]: I0129 12:00:10.916843 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049379 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049414 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqg79\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049510 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049593 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049698 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049764 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049812 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.049871 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.149549 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.150734 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151388 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151453 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151524 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151562 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151620 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqg79\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151658 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151704 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151740 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.151778 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.152201 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.153056 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.153300 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.153716 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.154323 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.154471 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.154517 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.154702 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-plkcp" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.155350 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.155373 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30da8a8bfa9339d0264e2d05f3a1665b31dc61f924efe0cc5afdc39dde19010c/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.156089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.156227 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.166275 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.166393 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.169992 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqg79\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.185083 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.193861 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.230956 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.252874 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hdgp\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.252937 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.252953 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.252970 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.252993 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.253017 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.253049 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.253074 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.253106 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354489 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354775 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hdgp\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354812 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354830 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354847 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354872 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354892 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354921 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.354940 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.356301 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.356890 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.357309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.357418 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.362088 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.362134 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.362510 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.363055 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.363080 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e231b3f56fa96d6ea9e653af8e921b02b4044cc2e09826ed8cc75d3cb14be7c5/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.371890 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hdgp\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.395853 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.519688 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.662128 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.834147 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerStarted","Data":"c406c1960c653318a7949550bc1387986cba8746dd2d2a8001103e4cc81f5947"} Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.836817 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" event={"ID":"113edc2d-a9a7-4550-8e9c-20beba0fcefc","Type":"ContainerStarted","Data":"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4"} Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.837146 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.838524 4852 generic.go:334] "Generic (PLEG): container finished" podID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerID="1889b1b701097d28bebeb32a1b8f5ebfb7c63379d9c1f6f9667a9ec310ded326" exitCode=0 Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.838578 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" event={"ID":"469ec5ee-8c2e-4207-8b6a-66770804daad","Type":"ContainerDied","Data":"1889b1b701097d28bebeb32a1b8f5ebfb7c63379d9c1f6f9667a9ec310ded326"} Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.860618 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" podStartSLOduration=2.860574852 podStartE2EDuration="2.860574852s" podCreationTimestamp="2026-01-29 12:00:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:11.859936567 +0000 UTC m=+4709.077267711" watchObservedRunningTime="2026-01-29 12:00:11.860574852 +0000 UTC m=+4709.077906006" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.921417 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.924099 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.928282 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-qbg8f" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.928659 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.929424 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.929621 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.934420 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.936833 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 29 12:00:11 crc kubenswrapper[4852]: I0129 12:00:11.992999 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.063653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064011 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064035 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064058 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064080 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064111 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kswcf\" (UniqueName: \"kubernetes.io/projected/cc3176db-9095-425e-ad2b-0fdcf60c6665-kube-api-access-kswcf\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064130 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.064146 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.165991 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166043 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166066 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166099 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kswcf\" (UniqueName: \"kubernetes.io/projected/cc3176db-9095-425e-ad2b-0fdcf60c6665-kube-api-access-kswcf\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166115 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166132 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166190 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.166223 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.167209 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-generated\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.167429 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-kolla-config\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.167916 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-config-data-default\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.168904 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc3176db-9095-425e-ad2b-0fdcf60c6665-operator-scripts\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.171511 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.171550 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/da17d506e20b138b6186e4b1afac8e5d39b9deb6ac48af559de7517b73f18385/globalmount\"" pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.279262 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.280694 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.283322 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.283617 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-nrjnp" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.291116 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.375277 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-config-data\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.375371 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7z5b\" (UniqueName: \"kubernetes.io/projected/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kube-api-access-r7z5b\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.375502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kolla-config\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.462636 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.462808 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kswcf\" (UniqueName: \"kubernetes.io/projected/cc3176db-9095-425e-ad2b-0fdcf60c6665-kube-api-access-kswcf\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.462931 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3176db-9095-425e-ad2b-0fdcf60c6665-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.477211 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-config-data\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.477280 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7z5b\" (UniqueName: \"kubernetes.io/projected/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kube-api-access-r7z5b\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.477367 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kolla-config\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.478184 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-config-data\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.478200 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kolla-config\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.495021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7z5b\" (UniqueName: \"kubernetes.io/projected/eb71bdfc-e30e-4843-bc11-ed203c2f82f0-kube-api-access-r7z5b\") pod \"memcached-0\" (UID: \"eb71bdfc-e30e-4843-bc11-ed203c2f82f0\") " pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.577494 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-06e3f64e-aa74-49b4-a827-5647565cd8a1\") pod \"openstack-galera-0\" (UID: \"cc3176db-9095-425e-ad2b-0fdcf60c6665\") " pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.667263 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.847799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerStarted","Data":"123711c4afed5b7d4be4e8a9ec131bdd2ac99a6bb7c7bb7592fd9ff1f2722cac"} Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.850162 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" event={"ID":"469ec5ee-8c2e-4207-8b6a-66770804daad","Type":"ContainerStarted","Data":"c0c6fda2635a61814bf133c325573ecfe45d1210622d8b0c9bb257ca4f594621"} Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.861992 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 12:00:12 crc kubenswrapper[4852]: I0129 12:00:12.892062 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" podStartSLOduration=3.892038936 podStartE2EDuration="3.892038936s" podCreationTimestamp="2026-01-29 12:00:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:12.865533319 +0000 UTC m=+4710.082864453" watchObservedRunningTime="2026-01-29 12:00:12.892038936 +0000 UTC m=+4710.109370070" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.084922 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 12:00:13 crc kubenswrapper[4852]: W0129 12:00:13.090929 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb71bdfc_e30e_4843_bc11_ed203c2f82f0.slice/crio-e641780947be2d8411453a13511aef8c383083d9f3b09f2d69559409efbeb205 WatchSource:0}: Error finding container e641780947be2d8411453a13511aef8c383083d9f3b09f2d69559409efbeb205: Status 404 returned error can't find the container with id e641780947be2d8411453a13511aef8c383083d9f3b09f2d69559409efbeb205 Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.216380 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 12:00:13 crc kubenswrapper[4852]: W0129 12:00:13.224287 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc3176db_9095_425e_ad2b_0fdcf60c6665.slice/crio-8a6ee2c9c56c2a46cf620cbf0ab0547f00c122fed8a3c61f2e94a3fd78128c34 WatchSource:0}: Error finding container 8a6ee2c9c56c2a46cf620cbf0ab0547f00c122fed8a3c61f2e94a3fd78128c34: Status 404 returned error can't find the container with id 8a6ee2c9c56c2a46cf620cbf0ab0547f00c122fed8a3c61f2e94a3fd78128c34 Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.366661 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.368038 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.371692 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.371766 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-9vpdm" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.372005 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.372107 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.376410 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.496935 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.496984 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d80257a3-f51a-479b-9283-851971a7c4bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d80257a3-f51a-479b-9283-851971a7c4bd\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497004 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv5gv\" (UniqueName: \"kubernetes.io/projected/9ae14cfa-d63c-4533-964e-4b87a973a38f-kube-api-access-pv5gv\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497253 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497416 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497467 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497670 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.497731 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599686 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599739 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599796 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599813 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599844 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599875 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d80257a3-f51a-479b-9283-851971a7c4bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d80257a3-f51a-479b-9283-851971a7c4bd\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599894 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv5gv\" (UniqueName: \"kubernetes.io/projected/9ae14cfa-d63c-4533-964e-4b87a973a38f-kube-api-access-pv5gv\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.599937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.601204 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.601429 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.601837 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.602007 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae14cfa-d63c-4533-964e-4b87a973a38f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.603698 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.603727 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d80257a3-f51a-479b-9283-851971a7c4bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d80257a3-f51a-479b-9283-851971a7c4bd\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ea17d7b21e27bcb28f16e385f0e170e92daf008a7ab214ff7af489e724c2f0ea/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.604042 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.605509 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ae14cfa-d63c-4533-964e-4b87a973a38f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.626171 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d80257a3-f51a-479b-9283-851971a7c4bd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d80257a3-f51a-479b-9283-851971a7c4bd\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.658266 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv5gv\" (UniqueName: \"kubernetes.io/projected/9ae14cfa-d63c-4533-964e-4b87a973a38f-kube-api-access-pv5gv\") pod \"openstack-cell1-galera-0\" (UID: \"9ae14cfa-d63c-4533-964e-4b87a973a38f\") " pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.828016 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.857251 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc3176db-9095-425e-ad2b-0fdcf60c6665","Type":"ContainerStarted","Data":"fe195a0faeb24223c374357c8a4408c65753c44ba11cc326dad0ee55add8bfed"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.857300 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc3176db-9095-425e-ad2b-0fdcf60c6665","Type":"ContainerStarted","Data":"8a6ee2c9c56c2a46cf620cbf0ab0547f00c122fed8a3c61f2e94a3fd78128c34"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.859532 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerStarted","Data":"bb537baeb31f50b94a7f823dc13d2712afe1fe98177e62ceb5904f51cc8de88a"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.861287 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"eb71bdfc-e30e-4843-bc11-ed203c2f82f0","Type":"ContainerStarted","Data":"f82ef515343fb0665cd94b82f1f2e0352fa98488b12e5090b5b8aeed87d4325f"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.861322 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"eb71bdfc-e30e-4843-bc11-ed203c2f82f0","Type":"ContainerStarted","Data":"e641780947be2d8411453a13511aef8c383083d9f3b09f2d69559409efbeb205"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.861648 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.863818 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerStarted","Data":"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a"} Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.863943 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:13 crc kubenswrapper[4852]: I0129 12:00:13.931251 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.931232177 podStartE2EDuration="1.931232177s" podCreationTimestamp="2026-01-29 12:00:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:13.930568811 +0000 UTC m=+4711.147899945" watchObservedRunningTime="2026-01-29 12:00:13.931232177 +0000 UTC m=+4711.148563301" Jan 29 12:00:14 crc kubenswrapper[4852]: I0129 12:00:14.293947 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 12:00:14 crc kubenswrapper[4852]: W0129 12:00:14.297843 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ae14cfa_d63c_4533_964e_4b87a973a38f.slice/crio-8d2bae2d19f4ccdcc67e76c734e51ef585ab7bfbddfc2f9ac3f16aefdb3eeae5 WatchSource:0}: Error finding container 8d2bae2d19f4ccdcc67e76c734e51ef585ab7bfbddfc2f9ac3f16aefdb3eeae5: Status 404 returned error can't find the container with id 8d2bae2d19f4ccdcc67e76c734e51ef585ab7bfbddfc2f9ac3f16aefdb3eeae5 Jan 29 12:00:14 crc kubenswrapper[4852]: I0129 12:00:14.870695 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9ae14cfa-d63c-4533-964e-4b87a973a38f","Type":"ContainerStarted","Data":"858f8f688e428c9fc8eb698f5b45f4407a9de4c55210205a97faa26113740eaa"} Jan 29 12:00:14 crc kubenswrapper[4852]: I0129 12:00:14.871045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9ae14cfa-d63c-4533-964e-4b87a973a38f","Type":"ContainerStarted","Data":"8d2bae2d19f4ccdcc67e76c734e51ef585ab7bfbddfc2f9ac3f16aefdb3eeae5"} Jan 29 12:00:17 crc kubenswrapper[4852]: I0129 12:00:17.899076 4852 generic.go:334] "Generic (PLEG): container finished" podID="cc3176db-9095-425e-ad2b-0fdcf60c6665" containerID="fe195a0faeb24223c374357c8a4408c65753c44ba11cc326dad0ee55add8bfed" exitCode=0 Jan 29 12:00:17 crc kubenswrapper[4852]: I0129 12:00:17.899109 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc3176db-9095-425e-ad2b-0fdcf60c6665","Type":"ContainerDied","Data":"fe195a0faeb24223c374357c8a4408c65753c44ba11cc326dad0ee55add8bfed"} Jan 29 12:00:18 crc kubenswrapper[4852]: I0129 12:00:18.917334 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"cc3176db-9095-425e-ad2b-0fdcf60c6665","Type":"ContainerStarted","Data":"3ac65aee9e6cba810f290ff5c6d16972693f231f3802c0e18d0ee76b1c486998"} Jan 29 12:00:18 crc kubenswrapper[4852]: I0129 12:00:18.920279 4852 generic.go:334] "Generic (PLEG): container finished" podID="9ae14cfa-d63c-4533-964e-4b87a973a38f" containerID="858f8f688e428c9fc8eb698f5b45f4407a9de4c55210205a97faa26113740eaa" exitCode=0 Jan 29 12:00:18 crc kubenswrapper[4852]: I0129 12:00:18.920315 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9ae14cfa-d63c-4533-964e-4b87a973a38f","Type":"ContainerDied","Data":"858f8f688e428c9fc8eb698f5b45f4407a9de4c55210205a97faa26113740eaa"} Jan 29 12:00:18 crc kubenswrapper[4852]: I0129 12:00:18.961739 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.961712148 podStartE2EDuration="8.961712148s" podCreationTimestamp="2026-01-29 12:00:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:18.953642001 +0000 UTC m=+4716.170973145" watchObservedRunningTime="2026-01-29 12:00:18.961712148 +0000 UTC m=+4716.179043282" Jan 29 12:00:19 crc kubenswrapper[4852]: I0129 12:00:19.930152 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"9ae14cfa-d63c-4533-964e-4b87a973a38f","Type":"ContainerStarted","Data":"48db290a62fc3027c702b21c4f17ce91747319b8db440469e89bf1c1b7c60831"} Jan 29 12:00:19 crc kubenswrapper[4852]: I0129 12:00:19.963310 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.963292882 podStartE2EDuration="7.963292882s" podCreationTimestamp="2026-01-29 12:00:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:19.963109608 +0000 UTC m=+4717.180440782" watchObservedRunningTime="2026-01-29 12:00:19.963292882 +0000 UTC m=+4717.180624016" Jan 29 12:00:20 crc kubenswrapper[4852]: I0129 12:00:20.070988 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:20 crc kubenswrapper[4852]: I0129 12:00:20.328849 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:00:20 crc kubenswrapper[4852]: I0129 12:00:20.388811 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:20 crc kubenswrapper[4852]: I0129 12:00:20.936698 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="dnsmasq-dns" containerID="cri-o://e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4" gracePeriod=10 Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.417003 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.538643 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config\") pod \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.538720 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc\") pod \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.538770 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f44kb\" (UniqueName: \"kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb\") pod \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\" (UID: \"113edc2d-a9a7-4550-8e9c-20beba0fcefc\") " Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.544158 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb" (OuterVolumeSpecName: "kube-api-access-f44kb") pod "113edc2d-a9a7-4550-8e9c-20beba0fcefc" (UID: "113edc2d-a9a7-4550-8e9c-20beba0fcefc"). InnerVolumeSpecName "kube-api-access-f44kb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.574932 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config" (OuterVolumeSpecName: "config") pod "113edc2d-a9a7-4550-8e9c-20beba0fcefc" (UID: "113edc2d-a9a7-4550-8e9c-20beba0fcefc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.591896 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "113edc2d-a9a7-4550-8e9c-20beba0fcefc" (UID: "113edc2d-a9a7-4550-8e9c-20beba0fcefc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.641008 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.641382 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/113edc2d-a9a7-4550-8e9c-20beba0fcefc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.641462 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f44kb\" (UniqueName: \"kubernetes.io/projected/113edc2d-a9a7-4550-8e9c-20beba0fcefc-kube-api-access-f44kb\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.947947 4852 generic.go:334] "Generic (PLEG): container finished" podID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerID="e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4" exitCode=0 Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.947997 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.948001 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" event={"ID":"113edc2d-a9a7-4550-8e9c-20beba0fcefc","Type":"ContainerDied","Data":"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4"} Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.948119 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-r5kvk" event={"ID":"113edc2d-a9a7-4550-8e9c-20beba0fcefc","Type":"ContainerDied","Data":"21adffc26bae89cbef8014164959230ec7ed6b118898d344124baf9c4f58b5b9"} Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.948142 4852 scope.go:117] "RemoveContainer" containerID="e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.969538 4852 scope.go:117] "RemoveContainer" containerID="7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.980924 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.986498 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-r5kvk"] Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.997007 4852 scope.go:117] "RemoveContainer" containerID="e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4" Jan 29 12:00:21 crc kubenswrapper[4852]: E0129 12:00:21.998517 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4\": container with ID starting with e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4 not found: ID does not exist" containerID="e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.998573 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4"} err="failed to get container status \"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4\": rpc error: code = NotFound desc = could not find container \"e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4\": container with ID starting with e93f945960c641001d19daccba480a9b389f23d99471307260993d67b2c739a4 not found: ID does not exist" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.998615 4852 scope.go:117] "RemoveContainer" containerID="7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581" Jan 29 12:00:21 crc kubenswrapper[4852]: E0129 12:00:21.999238 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581\": container with ID starting with 7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581 not found: ID does not exist" containerID="7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581" Jan 29 12:00:21 crc kubenswrapper[4852]: I0129 12:00:21.999281 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581"} err="failed to get container status \"7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581\": rpc error: code = NotFound desc = could not find container \"7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581\": container with ID starting with 7e4dd567282cee087dd16302c67a4a495c9cfa3ca3d25cce433e44cedbf36581 not found: ID does not exist" Jan 29 12:00:22 crc kubenswrapper[4852]: I0129 12:00:22.669349 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 29 12:00:22 crc kubenswrapper[4852]: I0129 12:00:22.862684 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 29 12:00:22 crc kubenswrapper[4852]: I0129 12:00:22.862748 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 29 12:00:23 crc kubenswrapper[4852]: I0129 12:00:23.171269 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 29 12:00:23 crc kubenswrapper[4852]: I0129 12:00:23.246162 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 29 12:00:23 crc kubenswrapper[4852]: I0129 12:00:23.487797 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" path="/var/lib/kubelet/pods/113edc2d-a9a7-4550-8e9c-20beba0fcefc/volumes" Jan 29 12:00:23 crc kubenswrapper[4852]: I0129 12:00:23.828937 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:23 crc kubenswrapper[4852]: I0129 12:00:23.829237 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:26 crc kubenswrapper[4852]: I0129 12:00:26.196524 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:26 crc kubenswrapper[4852]: I0129 12:00:26.286309 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.867744 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kmqh8"] Jan 29 12:00:30 crc kubenswrapper[4852]: E0129 12:00:30.868620 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="init" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.868645 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="init" Jan 29 12:00:30 crc kubenswrapper[4852]: E0129 12:00:30.868684 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="dnsmasq-dns" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.868697 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="dnsmasq-dns" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.868965 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="113edc2d-a9a7-4550-8e9c-20beba0fcefc" containerName="dnsmasq-dns" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.869833 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.872000 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.886615 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kmqh8"] Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.995645 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:30 crc kubenswrapper[4852]: I0129 12:00:30.995718 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8r4m\" (UniqueName: \"kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.096960 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8r4m\" (UniqueName: \"kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.097108 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.097971 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.122477 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8r4m\" (UniqueName: \"kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m\") pod \"root-account-create-update-kmqh8\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.204209 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:31 crc kubenswrapper[4852]: I0129 12:00:31.626127 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kmqh8"] Jan 29 12:00:31 crc kubenswrapper[4852]: W0129 12:00:31.629814 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c5f2d07_aa3f_439e_b0d4_ccca70a26ebc.slice/crio-4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59 WatchSource:0}: Error finding container 4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59: Status 404 returned error can't find the container with id 4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59 Jan 29 12:00:32 crc kubenswrapper[4852]: I0129 12:00:32.021321 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmqh8" event={"ID":"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc","Type":"ContainerStarted","Data":"c17b4aa02b83e31f0b8b187f47a1c97f7f1d39964d1364cf1e0a05f2845e3e4f"} Jan 29 12:00:32 crc kubenswrapper[4852]: I0129 12:00:32.021672 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmqh8" event={"ID":"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc","Type":"ContainerStarted","Data":"4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59"} Jan 29 12:00:32 crc kubenswrapper[4852]: I0129 12:00:32.039548 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-kmqh8" podStartSLOduration=2.039526917 podStartE2EDuration="2.039526917s" podCreationTimestamp="2026-01-29 12:00:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:32.036888682 +0000 UTC m=+4729.254219826" watchObservedRunningTime="2026-01-29 12:00:32.039526917 +0000 UTC m=+4729.256858061" Jan 29 12:00:33 crc kubenswrapper[4852]: I0129 12:00:33.030881 4852 generic.go:334] "Generic (PLEG): container finished" podID="5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" containerID="c17b4aa02b83e31f0b8b187f47a1c97f7f1d39964d1364cf1e0a05f2845e3e4f" exitCode=0 Jan 29 12:00:33 crc kubenswrapper[4852]: I0129 12:00:33.030932 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmqh8" event={"ID":"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc","Type":"ContainerDied","Data":"c17b4aa02b83e31f0b8b187f47a1c97f7f1d39964d1364cf1e0a05f2845e3e4f"} Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.331959 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.449674 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts\") pod \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.449732 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8r4m\" (UniqueName: \"kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m\") pod \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\" (UID: \"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc\") " Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.450415 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" (UID: "5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.456534 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m" (OuterVolumeSpecName: "kube-api-access-m8r4m") pod "5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" (UID: "5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc"). InnerVolumeSpecName "kube-api-access-m8r4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.551620 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:34 crc kubenswrapper[4852]: I0129 12:00:34.551703 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8r4m\" (UniqueName: \"kubernetes.io/projected/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc-kube-api-access-m8r4m\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:35 crc kubenswrapper[4852]: I0129 12:00:35.051690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmqh8" event={"ID":"5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc","Type":"ContainerDied","Data":"4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59"} Jan 29 12:00:35 crc kubenswrapper[4852]: I0129 12:00:35.051989 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e6e49509d8e136cf141b22c2833573a3d72babf9cf394ca9664be45badc3e59" Jan 29 12:00:35 crc kubenswrapper[4852]: I0129 12:00:35.051744 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmqh8" Jan 29 12:00:37 crc kubenswrapper[4852]: I0129 12:00:37.311922 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kmqh8"] Jan 29 12:00:37 crc kubenswrapper[4852]: I0129 12:00:37.317072 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kmqh8"] Jan 29 12:00:37 crc kubenswrapper[4852]: I0129 12:00:37.473062 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" path="/var/lib/kubelet/pods/5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc/volumes" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.323139 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zh6zb"] Jan 29 12:00:42 crc kubenswrapper[4852]: E0129 12:00:42.323920 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" containerName="mariadb-account-create-update" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.323936 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" containerName="mariadb-account-create-update" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.324120 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c5f2d07-aa3f-439e-b0d4-ccca70a26ebc" containerName="mariadb-account-create-update" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.324764 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.327657 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.332782 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zh6zb"] Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.382428 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nnh9\" (UniqueName: \"kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.382542 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.483754 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nnh9\" (UniqueName: \"kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.483881 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.484850 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.505249 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nnh9\" (UniqueName: \"kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9\") pod \"root-account-create-update-zh6zb\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:42 crc kubenswrapper[4852]: I0129 12:00:42.644532 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:43 crc kubenswrapper[4852]: I0129 12:00:43.141900 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zh6zb"] Jan 29 12:00:44 crc kubenswrapper[4852]: I0129 12:00:44.126984 4852 generic.go:334] "Generic (PLEG): container finished" podID="779c21f7-b3c4-40ec-86df-ded55bf0c715" containerID="d704e345f13663d19322c3c4143367395f13a59b72b2f8742889a76a50a329b1" exitCode=0 Jan 29 12:00:44 crc kubenswrapper[4852]: I0129 12:00:44.127082 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zh6zb" event={"ID":"779c21f7-b3c4-40ec-86df-ded55bf0c715","Type":"ContainerDied","Data":"d704e345f13663d19322c3c4143367395f13a59b72b2f8742889a76a50a329b1"} Jan 29 12:00:44 crc kubenswrapper[4852]: I0129 12:00:44.127321 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zh6zb" event={"ID":"779c21f7-b3c4-40ec-86df-ded55bf0c715","Type":"ContainerStarted","Data":"43663349321d4ffbfd3d7e5a6f6dabb45a57dbba4b4c2179cba1202598a48aaa"} Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.391277 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.429252 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nnh9\" (UniqueName: \"kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9\") pod \"779c21f7-b3c4-40ec-86df-ded55bf0c715\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.429419 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts\") pod \"779c21f7-b3c4-40ec-86df-ded55bf0c715\" (UID: \"779c21f7-b3c4-40ec-86df-ded55bf0c715\") " Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.430280 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "779c21f7-b3c4-40ec-86df-ded55bf0c715" (UID: "779c21f7-b3c4-40ec-86df-ded55bf0c715"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.441910 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9" (OuterVolumeSpecName: "kube-api-access-6nnh9") pod "779c21f7-b3c4-40ec-86df-ded55bf0c715" (UID: "779c21f7-b3c4-40ec-86df-ded55bf0c715"). InnerVolumeSpecName "kube-api-access-6nnh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.530740 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/779c21f7-b3c4-40ec-86df-ded55bf0c715-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:45 crc kubenswrapper[4852]: I0129 12:00:45.530786 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nnh9\" (UniqueName: \"kubernetes.io/projected/779c21f7-b3c4-40ec-86df-ded55bf0c715-kube-api-access-6nnh9\") on node \"crc\" DevicePath \"\"" Jan 29 12:00:46 crc kubenswrapper[4852]: I0129 12:00:46.143078 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zh6zb" event={"ID":"779c21f7-b3c4-40ec-86df-ded55bf0c715","Type":"ContainerDied","Data":"43663349321d4ffbfd3d7e5a6f6dabb45a57dbba4b4c2179cba1202598a48aaa"} Jan 29 12:00:46 crc kubenswrapper[4852]: I0129 12:00:46.143133 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh6zb" Jan 29 12:00:46 crc kubenswrapper[4852]: I0129 12:00:46.143150 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43663349321d4ffbfd3d7e5a6f6dabb45a57dbba4b4c2179cba1202598a48aaa" Jan 29 12:00:46 crc kubenswrapper[4852]: I0129 12:00:46.145161 4852 generic.go:334] "Generic (PLEG): container finished" podID="c211bc1f-7197-461e-b13d-9c99703f60be" containerID="7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a" exitCode=0 Jan 29 12:00:46 crc kubenswrapper[4852]: I0129 12:00:46.145254 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerDied","Data":"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a"} Jan 29 12:00:47 crc kubenswrapper[4852]: I0129 12:00:47.162911 4852 generic.go:334] "Generic (PLEG): container finished" podID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerID="bb537baeb31f50b94a7f823dc13d2712afe1fe98177e62ceb5904f51cc8de88a" exitCode=0 Jan 29 12:00:47 crc kubenswrapper[4852]: I0129 12:00:47.163405 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerDied","Data":"bb537baeb31f50b94a7f823dc13d2712afe1fe98177e62ceb5904f51cc8de88a"} Jan 29 12:00:47 crc kubenswrapper[4852]: I0129 12:00:47.167474 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerStarted","Data":"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9"} Jan 29 12:00:47 crc kubenswrapper[4852]: I0129 12:00:47.167827 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 12:00:47 crc kubenswrapper[4852]: I0129 12:00:47.227624 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.227605049 podStartE2EDuration="38.227605049s" podCreationTimestamp="2026-01-29 12:00:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:47.225857966 +0000 UTC m=+4744.443189100" watchObservedRunningTime="2026-01-29 12:00:47.227605049 +0000 UTC m=+4744.444936183" Jan 29 12:00:48 crc kubenswrapper[4852]: I0129 12:00:48.178901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerStarted","Data":"80e52ea2c8f40a3ae391d5cf89a86ba25158681687a65b7f124629e074834fb9"} Jan 29 12:00:48 crc kubenswrapper[4852]: I0129 12:00:48.179821 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:00:48 crc kubenswrapper[4852]: I0129 12:00:48.207328 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.207306188 podStartE2EDuration="38.207306188s" podCreationTimestamp="2026-01-29 12:00:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:00:48.198550125 +0000 UTC m=+4745.415881259" watchObservedRunningTime="2026-01-29 12:00:48.207306188 +0000 UTC m=+4745.424637322" Jan 29 12:00:48 crc kubenswrapper[4852]: I0129 12:00:48.987954 4852 scope.go:117] "RemoveContainer" containerID="3a7a74f2590a42f7cf2410880be4d9d5ba7bfe0cfe107145c5aee0153ecf9f71" Jan 29 12:01:00 crc kubenswrapper[4852]: I0129 12:01:00.017170 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:01:00 crc kubenswrapper[4852]: I0129 12:01:00.017709 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:01:01 crc kubenswrapper[4852]: I0129 12:01:01.234895 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 12:01:01 crc kubenswrapper[4852]: I0129 12:01:01.523061 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.792299 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:01:05 crc kubenswrapper[4852]: E0129 12:01:05.793214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="779c21f7-b3c4-40ec-86df-ded55bf0c715" containerName="mariadb-account-create-update" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.793233 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="779c21f7-b3c4-40ec-86df-ded55bf0c715" containerName="mariadb-account-create-update" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.793405 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="779c21f7-b3c4-40ec-86df-ded55bf0c715" containerName="mariadb-account-create-update" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.794397 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.812503 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.898677 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgb45\" (UniqueName: \"kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.899011 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:05 crc kubenswrapper[4852]: I0129 12:01:05.899115 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.000694 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgb45\" (UniqueName: \"kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.001032 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.001173 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.002021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.002021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.024253 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgb45\" (UniqueName: \"kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45\") pod \"dnsmasq-dns-5b7946d7b9-q7qjb\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.121452 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.363118 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:01:06 crc kubenswrapper[4852]: I0129 12:01:06.490369 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:07 crc kubenswrapper[4852]: I0129 12:01:07.238418 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:07 crc kubenswrapper[4852]: I0129 12:01:07.333561 4852 generic.go:334] "Generic (PLEG): container finished" podID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerID="490a189ae9f93b5acb18adc67debf9fa1d732f5bb7a11678b70b4125bb5cb02b" exitCode=0 Jan 29 12:01:07 crc kubenswrapper[4852]: I0129 12:01:07.333613 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" event={"ID":"84413885-86dc-4801-bb4f-e6a0e88875d0","Type":"ContainerDied","Data":"490a189ae9f93b5acb18adc67debf9fa1d732f5bb7a11678b70b4125bb5cb02b"} Jan 29 12:01:07 crc kubenswrapper[4852]: I0129 12:01:07.333638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" event={"ID":"84413885-86dc-4801-bb4f-e6a0e88875d0","Type":"ContainerStarted","Data":"a5c7cbb65c952d993beda88c85824cc5e3f9d8aa7e6a9593a013ab9e0bbb305d"} Jan 29 12:01:08 crc kubenswrapper[4852]: I0129 12:01:08.344775 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" event={"ID":"84413885-86dc-4801-bb4f-e6a0e88875d0","Type":"ContainerStarted","Data":"051f0ebce9efe5fa0095e62c13ba255e9c9c6e1ef4c3885aedbe6eb22bbf8b57"} Jan 29 12:01:08 crc kubenswrapper[4852]: I0129 12:01:08.345626 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:08 crc kubenswrapper[4852]: I0129 12:01:08.512176 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="rabbitmq" containerID="cri-o://38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9" gracePeriod=604798 Jan 29 12:01:09 crc kubenswrapper[4852]: I0129 12:01:09.040835 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="rabbitmq" containerID="cri-o://80e52ea2c8f40a3ae391d5cf89a86ba25158681687a65b7f124629e074834fb9" gracePeriod=604799 Jan 29 12:01:11 crc kubenswrapper[4852]: I0129 12:01:11.232663 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.243:5672: connect: connection refused" Jan 29 12:01:11 crc kubenswrapper[4852]: I0129 12:01:11.521232 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.244:5672: connect: connection refused" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.211792 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.248610 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" podStartSLOduration=10.248563606 podStartE2EDuration="10.248563606s" podCreationTimestamp="2026-01-29 12:01:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:01:08.369859474 +0000 UTC m=+4765.587190608" watchObservedRunningTime="2026-01-29 12:01:15.248563606 +0000 UTC m=+4772.465894760" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.349867 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.349929 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350014 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqg79\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350042 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350076 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350190 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350229 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350269 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie\") pod \"c211bc1f-7197-461e-b13d-9c99703f60be\" (UID: \"c211bc1f-7197-461e-b13d-9c99703f60be\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.350903 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.351302 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.351416 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.356479 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.356594 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info" (OuterVolumeSpecName: "pod-info") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.358304 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79" (OuterVolumeSpecName: "kube-api-access-gqg79") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "kube-api-access-gqg79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.365170 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01" (OuterVolumeSpecName: "persistence") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "pvc-cdc5a60d-4985-4c11-bd50-52c816031e01". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.379693 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf" (OuterVolumeSpecName: "server-conf") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.425004 4852 generic.go:334] "Generic (PLEG): container finished" podID="c211bc1f-7197-461e-b13d-9c99703f60be" containerID="38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9" exitCode=0 Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.425123 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerDied","Data":"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9"} Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.425162 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c211bc1f-7197-461e-b13d-9c99703f60be","Type":"ContainerDied","Data":"c406c1960c653318a7949550bc1387986cba8746dd2d2a8001103e4cc81f5947"} Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.425184 4852 scope.go:117] "RemoveContainer" containerID="38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.426161 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.430831 4852 generic.go:334] "Generic (PLEG): container finished" podID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerID="80e52ea2c8f40a3ae391d5cf89a86ba25158681687a65b7f124629e074834fb9" exitCode=0 Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.430868 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerDied","Data":"80e52ea2c8f40a3ae391d5cf89a86ba25158681687a65b7f124629e074834fb9"} Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.438875 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c211bc1f-7197-461e-b13d-9c99703f60be" (UID: "c211bc1f-7197-461e-b13d-9c99703f60be"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452024 4852 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211bc1f-7197-461e-b13d-9c99703f60be-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452059 4852 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452073 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452087 4852 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211bc1f-7197-461e-b13d-9c99703f60be-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452099 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452116 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqg79\" (UniqueName: \"kubernetes.io/projected/c211bc1f-7197-461e-b13d-9c99703f60be-kube-api-access-gqg79\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452127 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211bc1f-7197-461e-b13d-9c99703f60be-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452139 4852 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211bc1f-7197-461e-b13d-9c99703f60be-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.452185 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") on node \"crc\" " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.460285 4852 scope.go:117] "RemoveContainer" containerID="7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.472059 4852 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.472226 4852 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-cdc5a60d-4985-4c11-bd50-52c816031e01" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01") on node "crc" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.478891 4852 scope.go:117] "RemoveContainer" containerID="38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9" Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.479780 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9\": container with ID starting with 38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9 not found: ID does not exist" containerID="38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.479814 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9"} err="failed to get container status \"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9\": rpc error: code = NotFound desc = could not find container \"38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9\": container with ID starting with 38cc876106f6989637fb62c77365b00dfcfb2bb0a4811edf694ed9d6d47290e9 not found: ID does not exist" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.479837 4852 scope.go:117] "RemoveContainer" containerID="7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a" Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.480168 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a\": container with ID starting with 7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a not found: ID does not exist" containerID="7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.480188 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a"} err="failed to get container status \"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a\": rpc error: code = NotFound desc = could not find container \"7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a\": container with ID starting with 7b1cc209ff6193ce0f6051b243e69367315d40e13f4f0848d9aeb33f9836d23a not found: ID does not exist" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.554849 4852 reconciler_common.go:293] "Volume detached for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.679310 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.753175 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762129 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762293 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762436 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762531 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hdgp\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762610 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762728 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762792 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762830 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762872 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.762895 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie\") pod \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\" (UID: \"98003af5-418f-4e3c-b4e5-0a90e48f10d9\") " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.763831 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.766549 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.768129 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.779686 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp" (OuterVolumeSpecName: "kube-api-access-5hdgp") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "kube-api-access-5hdgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.782269 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4" (OuterVolumeSpecName: "persistence") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "pvc-9f945d56-1246-438e-881f-5b72c1bc95d4". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.784155 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info" (OuterVolumeSpecName: "pod-info") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.786406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.794983 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.796005 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.796172 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.796325 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.796460 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.796653 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="setup-container" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.796755 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="setup-container" Jan 29 12:01:15 crc kubenswrapper[4852]: E0129 12:01:15.796949 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="setup-container" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.797074 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="setup-container" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.797458 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.797572 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" containerName="rabbitmq" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.798854 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.800962 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.802861 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-n5knh" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.803002 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.803110 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.804206 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf" (OuterVolumeSpecName: "server-conf") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.807446 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.809728 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864630 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hdgp\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-kube-api-access-5hdgp\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864661 4852 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98003af5-418f-4e3c-b4e5-0a90e48f10d9-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864690 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") on node \"crc\" " Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864703 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864715 4852 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864723 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864732 4852 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98003af5-418f-4e3c-b4e5-0a90e48f10d9-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.864740 4852 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98003af5-418f-4e3c-b4e5-0a90e48f10d9-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.883221 4852 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.883360 4852 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-9f945d56-1246-438e-881f-5b72c1bc95d4" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4") on node "crc" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.886785 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "98003af5-418f-4e3c-b4e5-0a90e48f10d9" (UID: "98003af5-418f-4e3c-b4e5-0a90e48f10d9"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966523 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v2fp\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-kube-api-access-8v2fp\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966606 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966629 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966668 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966688 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966707 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966728 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966758 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966788 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966856 4852 reconciler_common.go:293] "Volume detached for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:15 crc kubenswrapper[4852]: I0129 12:01:15.966873 4852 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98003af5-418f-4e3c-b4e5-0a90e48f10d9-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.067774 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.067875 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v2fp\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-kube-api-access-8v2fp\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.067909 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.067932 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.067985 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.068007 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.068034 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.068057 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.068087 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.069124 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.069192 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.069242 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-server-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.069385 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.070117 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.070139 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30da8a8bfa9339d0264e2d05f3a1665b31dc61f924efe0cc5afdc39dde19010c/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.071633 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-pod-info\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.071670 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.072028 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.087720 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v2fp\" (UniqueName: \"kubernetes.io/projected/571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2-kube-api-access-8v2fp\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.096772 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cdc5a60d-4985-4c11-bd50-52c816031e01\") pod \"rabbitmq-server-0\" (UID: \"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2\") " pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.123544 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.129070 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.174815 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.176602 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="dnsmasq-dns" containerID="cri-o://c0c6fda2635a61814bf133c325573ecfe45d1210622d8b0c9bb257ca4f594621" gracePeriod=10 Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.428572 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.442880 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"98003af5-418f-4e3c-b4e5-0a90e48f10d9","Type":"ContainerDied","Data":"123711c4afed5b7d4be4e8a9ec131bdd2ac99a6bb7c7bb7592fd9ff1f2722cac"} Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.442936 4852 scope.go:117] "RemoveContainer" containerID="80e52ea2c8f40a3ae391d5cf89a86ba25158681687a65b7f124629e074834fb9" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.443078 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.447282 4852 generic.go:334] "Generic (PLEG): container finished" podID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerID="c0c6fda2635a61814bf133c325573ecfe45d1210622d8b0c9bb257ca4f594621" exitCode=0 Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.447359 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" event={"ID":"469ec5ee-8c2e-4207-8b6a-66770804daad","Type":"ContainerDied","Data":"c0c6fda2635a61814bf133c325573ecfe45d1210622d8b0c9bb257ca4f594621"} Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.450648 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2","Type":"ContainerStarted","Data":"568fd46d8a3bcc18142d52497b51c71b4c96920da4777598c380e8bfe467fa54"} Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.466705 4852 scope.go:117] "RemoveContainer" containerID="bb537baeb31f50b94a7f823dc13d2712afe1fe98177e62ceb5904f51cc8de88a" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.477860 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.487399 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.510780 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.512191 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.514149 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-plkcp" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.514333 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.514440 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.515270 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.515401 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.530934 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681428 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681474 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681496 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681556 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6296c02b-df73-4079-99ee-a7e761047e7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681596 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsrvm\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-kube-api-access-qsrvm\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681625 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6296c02b-df73-4079-99ee-a7e761047e7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681667 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681691 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.681707 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783627 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783736 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783821 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783844 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783895 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6296c02b-df73-4079-99ee-a7e761047e7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.783917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsrvm\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-kube-api-access-qsrvm\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.784892 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.785105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.785083 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6296c02b-df73-4079-99ee-a7e761047e7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.785321 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6296c02b-df73-4079-99ee-a7e761047e7f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.785321 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.785828 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.788002 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6296c02b-df73-4079-99ee-a7e761047e7f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.788138 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.788189 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e231b3f56fa96d6ea9e653af8e921b02b4044cc2e09826ed8cc75d3cb14be7c5/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.788800 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.788831 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6296c02b-df73-4079-99ee-a7e761047e7f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.802403 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsrvm\" (UniqueName: \"kubernetes.io/projected/6296c02b-df73-4079-99ee-a7e761047e7f-kube-api-access-qsrvm\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.822030 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9f945d56-1246-438e-881f-5b72c1bc95d4\") pod \"rabbitmq-cell1-server-0\" (UID: \"6296c02b-df73-4079-99ee-a7e761047e7f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:16 crc kubenswrapper[4852]: I0129 12:01:16.834261 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.139140 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.293344 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6r7j\" (UniqueName: \"kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j\") pod \"469ec5ee-8c2e-4207-8b6a-66770804daad\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.293507 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config\") pod \"469ec5ee-8c2e-4207-8b6a-66770804daad\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.293569 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc\") pod \"469ec5ee-8c2e-4207-8b6a-66770804daad\" (UID: \"469ec5ee-8c2e-4207-8b6a-66770804daad\") " Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.300547 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j" (OuterVolumeSpecName: "kube-api-access-b6r7j") pod "469ec5ee-8c2e-4207-8b6a-66770804daad" (UID: "469ec5ee-8c2e-4207-8b6a-66770804daad"). InnerVolumeSpecName "kube-api-access-b6r7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.316806 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 12:01:17 crc kubenswrapper[4852]: W0129 12:01:17.322039 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6296c02b_df73_4079_99ee_a7e761047e7f.slice/crio-bb1b13154642dbccea4105fe9ec968b67508df83212d007be8c6def52a7383f3 WatchSource:0}: Error finding container bb1b13154642dbccea4105fe9ec968b67508df83212d007be8c6def52a7383f3: Status 404 returned error can't find the container with id bb1b13154642dbccea4105fe9ec968b67508df83212d007be8c6def52a7383f3 Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.344287 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config" (OuterVolumeSpecName: "config") pod "469ec5ee-8c2e-4207-8b6a-66770804daad" (UID: "469ec5ee-8c2e-4207-8b6a-66770804daad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.344456 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "469ec5ee-8c2e-4207-8b6a-66770804daad" (UID: "469ec5ee-8c2e-4207-8b6a-66770804daad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.395316 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6r7j\" (UniqueName: \"kubernetes.io/projected/469ec5ee-8c2e-4207-8b6a-66770804daad-kube-api-access-b6r7j\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.395350 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.395359 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/469ec5ee-8c2e-4207-8b6a-66770804daad-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.462450 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.473762 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98003af5-418f-4e3c-b4e5-0a90e48f10d9" path="/var/lib/kubelet/pods/98003af5-418f-4e3c-b4e5-0a90e48f10d9/volumes" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.475671 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c211bc1f-7197-461e-b13d-9c99703f60be" path="/var/lib/kubelet/pods/c211bc1f-7197-461e-b13d-9c99703f60be/volumes" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.476693 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-6bhsf" event={"ID":"469ec5ee-8c2e-4207-8b6a-66770804daad","Type":"ContainerDied","Data":"7089f7c54afbce7c064bae32e2460dba8f96872498d585bf362df7ab17ce39d1"} Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.476741 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6296c02b-df73-4079-99ee-a7e761047e7f","Type":"ContainerStarted","Data":"bb1b13154642dbccea4105fe9ec968b67508df83212d007be8c6def52a7383f3"} Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.477036 4852 scope.go:117] "RemoveContainer" containerID="c0c6fda2635a61814bf133c325573ecfe45d1210622d8b0c9bb257ca4f594621" Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.496870 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.502947 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-6bhsf"] Jan 29 12:01:17 crc kubenswrapper[4852]: I0129 12:01:17.504088 4852 scope.go:117] "RemoveContainer" containerID="1889b1b701097d28bebeb32a1b8f5ebfb7c63379d9c1f6f9667a9ec310ded326" Jan 29 12:01:18 crc kubenswrapper[4852]: I0129 12:01:18.480401 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6296c02b-df73-4079-99ee-a7e761047e7f","Type":"ContainerStarted","Data":"e7a59dfcab01a2b5d278f9ca28e67639c22963b5e87abcc424569048916d1552"} Jan 29 12:01:18 crc kubenswrapper[4852]: I0129 12:01:18.482756 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2","Type":"ContainerStarted","Data":"2f66be4f03ae22a480107dff09ef7c16239eaf9c3bae5a6f5831c294a49b5e36"} Jan 29 12:01:19 crc kubenswrapper[4852]: I0129 12:01:19.481855 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" path="/var/lib/kubelet/pods/469ec5ee-8c2e-4207-8b6a-66770804daad/volumes" Jan 29 12:01:30 crc kubenswrapper[4852]: I0129 12:01:30.017144 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:01:30 crc kubenswrapper[4852]: I0129 12:01:30.017431 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:01:50 crc kubenswrapper[4852]: I0129 12:01:50.775033 4852 generic.go:334] "Generic (PLEG): container finished" podID="571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2" containerID="2f66be4f03ae22a480107dff09ef7c16239eaf9c3bae5a6f5831c294a49b5e36" exitCode=0 Jan 29 12:01:50 crc kubenswrapper[4852]: I0129 12:01:50.775143 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2","Type":"ContainerDied","Data":"2f66be4f03ae22a480107dff09ef7c16239eaf9c3bae5a6f5831c294a49b5e36"} Jan 29 12:01:51 crc kubenswrapper[4852]: I0129 12:01:51.789499 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2","Type":"ContainerStarted","Data":"076a4202669cd5b75f1e9de8e0f2efe1616d49c8dc35becae9f6fa1c398e8978"} Jan 29 12:01:51 crc kubenswrapper[4852]: I0129 12:01:51.790151 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 12:01:51 crc kubenswrapper[4852]: I0129 12:01:51.822230 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.822198876 podStartE2EDuration="36.822198876s" podCreationTimestamp="2026-01-29 12:01:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:01:51.816547258 +0000 UTC m=+4809.033878412" watchObservedRunningTime="2026-01-29 12:01:51.822198876 +0000 UTC m=+4809.039530010" Jan 29 12:01:52 crc kubenswrapper[4852]: I0129 12:01:52.797571 4852 generic.go:334] "Generic (PLEG): container finished" podID="6296c02b-df73-4079-99ee-a7e761047e7f" containerID="e7a59dfcab01a2b5d278f9ca28e67639c22963b5e87abcc424569048916d1552" exitCode=0 Jan 29 12:01:52 crc kubenswrapper[4852]: I0129 12:01:52.797655 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6296c02b-df73-4079-99ee-a7e761047e7f","Type":"ContainerDied","Data":"e7a59dfcab01a2b5d278f9ca28e67639c22963b5e87abcc424569048916d1552"} Jan 29 12:01:53 crc kubenswrapper[4852]: I0129 12:01:53.806707 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6296c02b-df73-4079-99ee-a7e761047e7f","Type":"ContainerStarted","Data":"23b0af7c184c4347e980a533ad6a031cc1efb5a81c908408a1a3258c210375ac"} Jan 29 12:01:53 crc kubenswrapper[4852]: I0129 12:01:53.807202 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:01:53 crc kubenswrapper[4852]: I0129 12:01:53.837982 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.837920561 podStartE2EDuration="37.837920561s" podCreationTimestamp="2026-01-29 12:01:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:01:53.831888613 +0000 UTC m=+4811.049219817" watchObservedRunningTime="2026-01-29 12:01:53.837920561 +0000 UTC m=+4811.055251725" Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.016943 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.017576 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.017696 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.018508 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.018605 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492" gracePeriod=600 Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.863989 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492" exitCode=0 Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.864289 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492"} Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.864355 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87"} Jan 29 12:02:00 crc kubenswrapper[4852]: I0129 12:02:00.864378 4852 scope.go:117] "RemoveContainer" containerID="f9bf3370be7efbb49deb74c0fe92d34b624ee46467d00118a0d957c442cd9d2b" Jan 29 12:02:06 crc kubenswrapper[4852]: I0129 12:02:06.131923 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 12:02:06 crc kubenswrapper[4852]: I0129 12:02:06.838912 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.526644 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:18 crc kubenswrapper[4852]: E0129 12:02:18.527431 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="init" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.527446 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="init" Jan 29 12:02:18 crc kubenswrapper[4852]: E0129 12:02:18.527479 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="dnsmasq-dns" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.527485 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="dnsmasq-dns" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.527665 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="469ec5ee-8c2e-4207-8b6a-66770804daad" containerName="dnsmasq-dns" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.528149 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.530903 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-dfz8x" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.534103 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.570951 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhmwq\" (UniqueName: \"kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq\") pod \"mariadb-client\" (UID: \"0d000388-0a31-47c9-b6a9-61a8dcecd0a7\") " pod="openstack/mariadb-client" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.672541 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhmwq\" (UniqueName: \"kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq\") pod \"mariadb-client\" (UID: \"0d000388-0a31-47c9-b6a9-61a8dcecd0a7\") " pod="openstack/mariadb-client" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.691969 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhmwq\" (UniqueName: \"kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq\") pod \"mariadb-client\" (UID: \"0d000388-0a31-47c9-b6a9-61a8dcecd0a7\") " pod="openstack/mariadb-client" Jan 29 12:02:18 crc kubenswrapper[4852]: I0129 12:02:18.886153 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:02:19 crc kubenswrapper[4852]: I0129 12:02:19.363531 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:19 crc kubenswrapper[4852]: W0129 12:02:19.365211 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d000388_0a31_47c9_b6a9_61a8dcecd0a7.slice/crio-2f7a1f0e5a6f0e38192d1a7cd8043a3c88e8a16ffe7c46bd71ee217c531f8658 WatchSource:0}: Error finding container 2f7a1f0e5a6f0e38192d1a7cd8043a3c88e8a16ffe7c46bd71ee217c531f8658: Status 404 returned error can't find the container with id 2f7a1f0e5a6f0e38192d1a7cd8043a3c88e8a16ffe7c46bd71ee217c531f8658 Jan 29 12:02:20 crc kubenswrapper[4852]: I0129 12:02:20.196470 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0d000388-0a31-47c9-b6a9-61a8dcecd0a7","Type":"ContainerStarted","Data":"fc21ca45536e88be4929a7656e8caf1ed35f32a697bc38d658b76ff9a64b9337"} Jan 29 12:02:20 crc kubenswrapper[4852]: I0129 12:02:20.197704 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0d000388-0a31-47c9-b6a9-61a8dcecd0a7","Type":"ContainerStarted","Data":"2f7a1f0e5a6f0e38192d1a7cd8043a3c88e8a16ffe7c46bd71ee217c531f8658"} Jan 29 12:02:20 crc kubenswrapper[4852]: I0129 12:02:20.220505 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client" podStartSLOduration=2.220487487 podStartE2EDuration="2.220487487s" podCreationTimestamp="2026-01-29 12:02:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:02:20.214389348 +0000 UTC m=+4837.431720522" watchObservedRunningTime="2026-01-29 12:02:20.220487487 +0000 UTC m=+4837.437818621" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.786413 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.788698 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.788794 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.854679 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fbd8\" (UniqueName: \"kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.854782 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.854860 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.956738 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.956871 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.957359 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.957398 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fbd8\" (UniqueName: \"kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.957381 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:23 crc kubenswrapper[4852]: I0129 12:02:23.981387 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fbd8\" (UniqueName: \"kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8\") pod \"community-operators-htbnw\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:24 crc kubenswrapper[4852]: I0129 12:02:24.148805 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:24 crc kubenswrapper[4852]: I0129 12:02:24.704217 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:25 crc kubenswrapper[4852]: I0129 12:02:25.249195 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerStarted","Data":"0f3c306f39ae363aac185fced23a5268f0826a32f3fbaa8a46d626edb83c5326"} Jan 29 12:02:25 crc kubenswrapper[4852]: I0129 12:02:25.249241 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerStarted","Data":"33ac61500951ec885bc6fd61fafba3a82b8d2350a2b0b980571816ea793744f8"} Jan 29 12:02:26 crc kubenswrapper[4852]: I0129 12:02:26.262640 4852 generic.go:334] "Generic (PLEG): container finished" podID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerID="0f3c306f39ae363aac185fced23a5268f0826a32f3fbaa8a46d626edb83c5326" exitCode=0 Jan 29 12:02:26 crc kubenswrapper[4852]: I0129 12:02:26.262736 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerDied","Data":"0f3c306f39ae363aac185fced23a5268f0826a32f3fbaa8a46d626edb83c5326"} Jan 29 12:02:28 crc kubenswrapper[4852]: I0129 12:02:28.281286 4852 generic.go:334] "Generic (PLEG): container finished" podID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerID="eb5ae318f849211264cbe80338a41dc45eff3892e4d907b670dfa9da561bde82" exitCode=0 Jan 29 12:02:28 crc kubenswrapper[4852]: I0129 12:02:28.281392 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerDied","Data":"eb5ae318f849211264cbe80338a41dc45eff3892e4d907b670dfa9da561bde82"} Jan 29 12:02:29 crc kubenswrapper[4852]: I0129 12:02:29.291786 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerStarted","Data":"e80db3c743f767699bac3536fb863e9ad2cae3991f6c2b5885cdba3857fc4ead"} Jan 29 12:02:29 crc kubenswrapper[4852]: I0129 12:02:29.315930 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-htbnw" podStartSLOduration=3.749637265 podStartE2EDuration="6.315909874s" podCreationTimestamp="2026-01-29 12:02:23 +0000 UTC" firstStartedPulling="2026-01-29 12:02:26.264793701 +0000 UTC m=+4843.482124835" lastFinishedPulling="2026-01-29 12:02:28.83106631 +0000 UTC m=+4846.048397444" observedRunningTime="2026-01-29 12:02:29.312289375 +0000 UTC m=+4846.529620509" watchObservedRunningTime="2026-01-29 12:02:29.315909874 +0000 UTC m=+4846.533241008" Jan 29 12:02:30 crc kubenswrapper[4852]: E0129 12:02:30.416370 4852 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:34376->38.102.83.23:36165: write tcp 38.102.83.23:34376->38.102.83.23:36165: write: broken pipe Jan 29 12:02:33 crc kubenswrapper[4852]: I0129 12:02:33.911277 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:33 crc kubenswrapper[4852]: I0129 12:02:33.911955 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-client" podUID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" containerName="mariadb-client" containerID="cri-o://fc21ca45536e88be4929a7656e8caf1ed35f32a697bc38d658b76ff9a64b9337" gracePeriod=30 Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.149059 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.149778 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.202324 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.333456 4852 generic.go:334] "Generic (PLEG): container finished" podID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" containerID="fc21ca45536e88be4929a7656e8caf1ed35f32a697bc38d658b76ff9a64b9337" exitCode=143 Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.334489 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0d000388-0a31-47c9-b6a9-61a8dcecd0a7","Type":"ContainerDied","Data":"fc21ca45536e88be4929a7656e8caf1ed35f32a697bc38d658b76ff9a64b9337"} Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.379103 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.440074 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.474552 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.552185 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhmwq\" (UniqueName: \"kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq\") pod \"0d000388-0a31-47c9-b6a9-61a8dcecd0a7\" (UID: \"0d000388-0a31-47c9-b6a9-61a8dcecd0a7\") " Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.558424 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq" (OuterVolumeSpecName: "kube-api-access-mhmwq") pod "0d000388-0a31-47c9-b6a9-61a8dcecd0a7" (UID: "0d000388-0a31-47c9-b6a9-61a8dcecd0a7"). InnerVolumeSpecName "kube-api-access-mhmwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:02:34 crc kubenswrapper[4852]: I0129 12:02:34.654376 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhmwq\" (UniqueName: \"kubernetes.io/projected/0d000388-0a31-47c9-b6a9-61a8dcecd0a7-kube-api-access-mhmwq\") on node \"crc\" DevicePath \"\"" Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.344327 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.344337 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"0d000388-0a31-47c9-b6a9-61a8dcecd0a7","Type":"ContainerDied","Data":"2f7a1f0e5a6f0e38192d1a7cd8043a3c88e8a16ffe7c46bd71ee217c531f8658"} Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.344449 4852 scope.go:117] "RemoveContainer" containerID="fc21ca45536e88be4929a7656e8caf1ed35f32a697bc38d658b76ff9a64b9337" Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.384018 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.392464 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:02:35 crc kubenswrapper[4852]: I0129 12:02:35.475127 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" path="/var/lib/kubelet/pods/0d000388-0a31-47c9-b6a9-61a8dcecd0a7/volumes" Jan 29 12:02:36 crc kubenswrapper[4852]: I0129 12:02:36.352839 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-htbnw" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="registry-server" containerID="cri-o://e80db3c743f767699bac3536fb863e9ad2cae3991f6c2b5885cdba3857fc4ead" gracePeriod=2 Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.360916 4852 generic.go:334] "Generic (PLEG): container finished" podID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerID="e80db3c743f767699bac3536fb863e9ad2cae3991f6c2b5885cdba3857fc4ead" exitCode=0 Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.360962 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerDied","Data":"e80db3c743f767699bac3536fb863e9ad2cae3991f6c2b5885cdba3857fc4ead"} Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.730617 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.816282 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content\") pod \"9a55943f-b44c-41c4-82e9-e266d1b172d4\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.816424 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities\") pod \"9a55943f-b44c-41c4-82e9-e266d1b172d4\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.816469 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fbd8\" (UniqueName: \"kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8\") pod \"9a55943f-b44c-41c4-82e9-e266d1b172d4\" (UID: \"9a55943f-b44c-41c4-82e9-e266d1b172d4\") " Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.817753 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities" (OuterVolumeSpecName: "utilities") pod "9a55943f-b44c-41c4-82e9-e266d1b172d4" (UID: "9a55943f-b44c-41c4-82e9-e266d1b172d4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.822076 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8" (OuterVolumeSpecName: "kube-api-access-8fbd8") pod "9a55943f-b44c-41c4-82e9-e266d1b172d4" (UID: "9a55943f-b44c-41c4-82e9-e266d1b172d4"). InnerVolumeSpecName "kube-api-access-8fbd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.873180 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a55943f-b44c-41c4-82e9-e266d1b172d4" (UID: "9a55943f-b44c-41c4-82e9-e266d1b172d4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.918665 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fbd8\" (UniqueName: \"kubernetes.io/projected/9a55943f-b44c-41c4-82e9-e266d1b172d4-kube-api-access-8fbd8\") on node \"crc\" DevicePath \"\"" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.918707 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:02:37 crc kubenswrapper[4852]: I0129 12:02:37.918716 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a55943f-b44c-41c4-82e9-e266d1b172d4-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.370116 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htbnw" event={"ID":"9a55943f-b44c-41c4-82e9-e266d1b172d4","Type":"ContainerDied","Data":"33ac61500951ec885bc6fd61fafba3a82b8d2350a2b0b980571816ea793744f8"} Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.370172 4852 scope.go:117] "RemoveContainer" containerID="e80db3c743f767699bac3536fb863e9ad2cae3991f6c2b5885cdba3857fc4ead" Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.370194 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htbnw" Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.387506 4852 scope.go:117] "RemoveContainer" containerID="eb5ae318f849211264cbe80338a41dc45eff3892e4d907b670dfa9da561bde82" Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.401799 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.413786 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-htbnw"] Jan 29 12:02:38 crc kubenswrapper[4852]: I0129 12:02:38.431676 4852 scope.go:117] "RemoveContainer" containerID="0f3c306f39ae363aac185fced23a5268f0826a32f3fbaa8a46d626edb83c5326" Jan 29 12:02:39 crc kubenswrapper[4852]: I0129 12:02:39.474758 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" path="/var/lib/kubelet/pods/9a55943f-b44c-41c4-82e9-e266d1b172d4/volumes" Jan 29 12:02:49 crc kubenswrapper[4852]: I0129 12:02:49.122548 4852 scope.go:117] "RemoveContainer" containerID="d4dc398c5aad1e544aa8bf58e8c2c0880359bc98f5dd529793c6aa82c7582234" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.426344 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:03:54 crc kubenswrapper[4852]: E0129 12:03:54.427373 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" containerName="mariadb-client" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427393 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" containerName="mariadb-client" Jan 29 12:03:54 crc kubenswrapper[4852]: E0129 12:03:54.427410 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="registry-server" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427419 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="registry-server" Jan 29 12:03:54 crc kubenswrapper[4852]: E0129 12:03:54.427437 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="extract-content" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427445 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="extract-content" Jan 29 12:03:54 crc kubenswrapper[4852]: E0129 12:03:54.427466 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="extract-utilities" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427474 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="extract-utilities" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427676 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a55943f-b44c-41c4-82e9-e266d1b172d4" containerName="registry-server" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.427697 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d000388-0a31-47c9-b6a9-61a8dcecd0a7" containerName="mariadb-client" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.428912 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.446933 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.594406 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.594534 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtgmz\" (UniqueName: \"kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.594603 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.696234 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.696325 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.696417 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtgmz\" (UniqueName: \"kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.696729 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.696822 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.715088 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtgmz\" (UniqueName: \"kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz\") pod \"certified-operators-bxjgf\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:54 crc kubenswrapper[4852]: I0129 12:03:54.758144 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:03:55 crc kubenswrapper[4852]: I0129 12:03:55.060095 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:03:56 crc kubenswrapper[4852]: I0129 12:03:56.018341 4852 generic.go:334] "Generic (PLEG): container finished" podID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerID="898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b" exitCode=0 Jan 29 12:03:56 crc kubenswrapper[4852]: I0129 12:03:56.018386 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerDied","Data":"898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b"} Jan 29 12:03:56 crc kubenswrapper[4852]: I0129 12:03:56.018414 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerStarted","Data":"c97d0e10187171177d56f6e12c72ea6773151fe7c623c5508807eabe60c4273e"} Jan 29 12:03:58 crc kubenswrapper[4852]: I0129 12:03:58.041474 4852 generic.go:334] "Generic (PLEG): container finished" podID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerID="07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec" exitCode=0 Jan 29 12:03:58 crc kubenswrapper[4852]: I0129 12:03:58.041573 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerDied","Data":"07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec"} Jan 29 12:03:59 crc kubenswrapper[4852]: I0129 12:03:59.053326 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerStarted","Data":"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a"} Jan 29 12:04:00 crc kubenswrapper[4852]: I0129 12:04:00.016695 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:04:00 crc kubenswrapper[4852]: I0129 12:04:00.016762 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:04:04 crc kubenswrapper[4852]: I0129 12:04:04.758678 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:04 crc kubenswrapper[4852]: I0129 12:04:04.759366 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:04 crc kubenswrapper[4852]: I0129 12:04:04.804901 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:04 crc kubenswrapper[4852]: I0129 12:04:04.832429 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bxjgf" podStartSLOduration=8.168405458 podStartE2EDuration="10.832358511s" podCreationTimestamp="2026-01-29 12:03:54 +0000 UTC" firstStartedPulling="2026-01-29 12:03:56.020520878 +0000 UTC m=+4933.237852052" lastFinishedPulling="2026-01-29 12:03:58.684473971 +0000 UTC m=+4935.901805105" observedRunningTime="2026-01-29 12:03:59.074935473 +0000 UTC m=+4936.292266607" watchObservedRunningTime="2026-01-29 12:04:04.832358511 +0000 UTC m=+4942.049689645" Jan 29 12:04:05 crc kubenswrapper[4852]: I0129 12:04:05.140864 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:05 crc kubenswrapper[4852]: I0129 12:04:05.194428 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.116154 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bxjgf" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="registry-server" containerID="cri-o://72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a" gracePeriod=2 Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.497761 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.595353 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtgmz\" (UniqueName: \"kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz\") pod \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.595494 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content\") pod \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.595543 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities\") pod \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\" (UID: \"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23\") " Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.598349 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities" (OuterVolumeSpecName: "utilities") pod "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" (UID: "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.602083 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz" (OuterVolumeSpecName: "kube-api-access-vtgmz") pod "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" (UID: "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23"). InnerVolumeSpecName "kube-api-access-vtgmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.654155 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" (UID: "4fd76267-7fc4-40d4-a3ff-d7efe7e18e23"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.696851 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.696894 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:04:07 crc kubenswrapper[4852]: I0129 12:04:07.696906 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtgmz\" (UniqueName: \"kubernetes.io/projected/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23-kube-api-access-vtgmz\") on node \"crc\" DevicePath \"\"" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.126263 4852 generic.go:334] "Generic (PLEG): container finished" podID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerID="72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a" exitCode=0 Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.126342 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerDied","Data":"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a"} Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.126411 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxjgf" event={"ID":"4fd76267-7fc4-40d4-a3ff-d7efe7e18e23","Type":"ContainerDied","Data":"c97d0e10187171177d56f6e12c72ea6773151fe7c623c5508807eabe60c4273e"} Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.126433 4852 scope.go:117] "RemoveContainer" containerID="72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.126445 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxjgf" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.151462 4852 scope.go:117] "RemoveContainer" containerID="07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.167883 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.173828 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bxjgf"] Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.204831 4852 scope.go:117] "RemoveContainer" containerID="898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.226309 4852 scope.go:117] "RemoveContainer" containerID="72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a" Jan 29 12:04:08 crc kubenswrapper[4852]: E0129 12:04:08.226741 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a\": container with ID starting with 72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a not found: ID does not exist" containerID="72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.226772 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a"} err="failed to get container status \"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a\": rpc error: code = NotFound desc = could not find container \"72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a\": container with ID starting with 72a1385d777abd08503c9999287024ae4fb12fa04a430b811fff692989c0840a not found: ID does not exist" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.226802 4852 scope.go:117] "RemoveContainer" containerID="07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec" Jan 29 12:04:08 crc kubenswrapper[4852]: E0129 12:04:08.227162 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec\": container with ID starting with 07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec not found: ID does not exist" containerID="07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.227185 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec"} err="failed to get container status \"07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec\": rpc error: code = NotFound desc = could not find container \"07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec\": container with ID starting with 07e916eefb480ac38dd0e4ac404c7660b0ec97b29a10f5bfd1579333c761c9ec not found: ID does not exist" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.227199 4852 scope.go:117] "RemoveContainer" containerID="898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b" Jan 29 12:04:08 crc kubenswrapper[4852]: E0129 12:04:08.227622 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b\": container with ID starting with 898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b not found: ID does not exist" containerID="898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b" Jan 29 12:04:08 crc kubenswrapper[4852]: I0129 12:04:08.227647 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b"} err="failed to get container status \"898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b\": rpc error: code = NotFound desc = could not find container \"898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b\": container with ID starting with 898c0a702f554f037451a2dbf0c9a39511633d7276b6553da36d9a7da591f72b not found: ID does not exist" Jan 29 12:04:09 crc kubenswrapper[4852]: I0129 12:04:09.476485 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" path="/var/lib/kubelet/pods/4fd76267-7fc4-40d4-a3ff-d7efe7e18e23/volumes" Jan 29 12:04:30 crc kubenswrapper[4852]: I0129 12:04:30.017804 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:04:30 crc kubenswrapper[4852]: I0129 12:04:30.018801 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.017232 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.018815 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.018940 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.019682 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.019819 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" gracePeriod=600 Jan 29 12:05:00 crc kubenswrapper[4852]: E0129 12:05:00.153509 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.617218 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" exitCode=0 Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.617296 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87"} Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.617677 4852 scope.go:117] "RemoveContainer" containerID="9e329bd6a97de0fde26c46a7e6288174841eeafeb381c1cb751e172021dc8492" Jan 29 12:05:00 crc kubenswrapper[4852]: I0129 12:05:00.618257 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:05:00 crc kubenswrapper[4852]: E0129 12:05:00.618562 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:05:14 crc kubenswrapper[4852]: I0129 12:05:14.466071 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:05:14 crc kubenswrapper[4852]: E0129 12:05:14.467511 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:05:28 crc kubenswrapper[4852]: I0129 12:05:28.463246 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:05:28 crc kubenswrapper[4852]: E0129 12:05:28.464015 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:05:39 crc kubenswrapper[4852]: I0129 12:05:39.464150 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:05:39 crc kubenswrapper[4852]: E0129 12:05:39.465015 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:05:50 crc kubenswrapper[4852]: I0129 12:05:50.463833 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:05:50 crc kubenswrapper[4852]: E0129 12:05:50.464833 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:06:05 crc kubenswrapper[4852]: I0129 12:06:05.464229 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:06:05 crc kubenswrapper[4852]: E0129 12:06:05.464991 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:06:16 crc kubenswrapper[4852]: I0129 12:06:16.463511 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:06:16 crc kubenswrapper[4852]: E0129 12:06:16.464423 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:06:27 crc kubenswrapper[4852]: I0129 12:06:27.464530 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:06:27 crc kubenswrapper[4852]: E0129 12:06:27.465422 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.890447 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 12:06:32 crc kubenswrapper[4852]: E0129 12:06:32.892140 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="registry-server" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.892170 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="registry-server" Jan 29 12:06:32 crc kubenswrapper[4852]: E0129 12:06:32.892237 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="extract-utilities" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.892251 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="extract-utilities" Jan 29 12:06:32 crc kubenswrapper[4852]: E0129 12:06:32.892273 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="extract-content" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.892287 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="extract-content" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.892687 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd76267-7fc4-40d4-a3ff-d7efe7e18e23" containerName="registry-server" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.893822 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.897348 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-dfz8x" Jan 29 12:06:32 crc kubenswrapper[4852]: I0129 12:06:32.904394 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.008998 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf5gh\" (UniqueName: \"kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.009502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.110803 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf5gh\" (UniqueName: \"kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.110880 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.115557 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.115656 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2449d5e384919f7851e91cd1e38f3bc56ea83828f9d4d0c3d718a6edf59f13d7/globalmount\"" pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.133378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf5gh\" (UniqueName: \"kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.158899 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") pod \"mariadb-copy-data\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.230553 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.746928 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 12:06:33 crc kubenswrapper[4852]: W0129 12:06:33.754244 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d441f0f_dfdf_478b_b6fc_aac1663ad0d2.slice/crio-7852247428f467bbbfaaa9ff550dc910160e6c11d9315fda89886b4a0b69dbc7 WatchSource:0}: Error finding container 7852247428f467bbbfaaa9ff550dc910160e6c11d9315fda89886b4a0b69dbc7: Status 404 returned error can't find the container with id 7852247428f467bbbfaaa9ff550dc910160e6c11d9315fda89886b4a0b69dbc7 Jan 29 12:06:33 crc kubenswrapper[4852]: I0129 12:06:33.852215 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2","Type":"ContainerStarted","Data":"7852247428f467bbbfaaa9ff550dc910160e6c11d9315fda89886b4a0b69dbc7"} Jan 29 12:06:34 crc kubenswrapper[4852]: I0129 12:06:34.862501 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2","Type":"ContainerStarted","Data":"b273d55a9a2199aca554638e48d079d9579814affcd6e894bd8fa9ee054f94e0"} Jan 29 12:06:34 crc kubenswrapper[4852]: I0129 12:06:34.880369 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.8803494819999997 podStartE2EDuration="3.880349482s" podCreationTimestamp="2026-01-29 12:06:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:06:34.876269962 +0000 UTC m=+5092.093601106" watchObservedRunningTime="2026-01-29 12:06:34.880349482 +0000 UTC m=+5092.097680616" Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.588880 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.590834 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.620548 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.692632 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcfkh\" (UniqueName: \"kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh\") pod \"mariadb-client\" (UID: \"be8b6197-e756-451b-b6b5-b59d1738da2c\") " pod="openstack/mariadb-client" Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.794711 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcfkh\" (UniqueName: \"kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh\") pod \"mariadb-client\" (UID: \"be8b6197-e756-451b-b6b5-b59d1738da2c\") " pod="openstack/mariadb-client" Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.831550 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcfkh\" (UniqueName: \"kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh\") pod \"mariadb-client\" (UID: \"be8b6197-e756-451b-b6b5-b59d1738da2c\") " pod="openstack/mariadb-client" Jan 29 12:06:37 crc kubenswrapper[4852]: I0129 12:06:37.924375 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:38 crc kubenswrapper[4852]: I0129 12:06:38.463857 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:06:38 crc kubenswrapper[4852]: E0129 12:06:38.464402 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:06:38 crc kubenswrapper[4852]: I0129 12:06:38.477909 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:38 crc kubenswrapper[4852]: I0129 12:06:38.912094 4852 generic.go:334] "Generic (PLEG): container finished" podID="be8b6197-e756-451b-b6b5-b59d1738da2c" containerID="7ed555839cc90fb828afcf1fc2303d6ffb0c74ae734b3c086768b4f564f3e551" exitCode=0 Jan 29 12:06:38 crc kubenswrapper[4852]: I0129 12:06:38.912160 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"be8b6197-e756-451b-b6b5-b59d1738da2c","Type":"ContainerDied","Data":"7ed555839cc90fb828afcf1fc2303d6ffb0c74ae734b3c086768b4f564f3e551"} Jan 29 12:06:38 crc kubenswrapper[4852]: I0129 12:06:38.912191 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"be8b6197-e756-451b-b6b5-b59d1738da2c","Type":"ContainerStarted","Data":"075738e0f2a5b239b8e962a01e053cce32cb0f971eff26d8c5c47efcd904d8cd"} Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.243422 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.329734 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_be8b6197-e756-451b-b6b5-b59d1738da2c/mariadb-client/0.log" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.354515 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcfkh\" (UniqueName: \"kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh\") pod \"be8b6197-e756-451b-b6b5-b59d1738da2c\" (UID: \"be8b6197-e756-451b-b6b5-b59d1738da2c\") " Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.360883 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.362390 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh" (OuterVolumeSpecName: "kube-api-access-gcfkh") pod "be8b6197-e756-451b-b6b5-b59d1738da2c" (UID: "be8b6197-e756-451b-b6b5-b59d1738da2c"). InnerVolumeSpecName "kube-api-access-gcfkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.367612 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.456961 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcfkh\" (UniqueName: \"kubernetes.io/projected/be8b6197-e756-451b-b6b5-b59d1738da2c-kube-api-access-gcfkh\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.514540 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:40 crc kubenswrapper[4852]: E0129 12:06:40.514904 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be8b6197-e756-451b-b6b5-b59d1738da2c" containerName="mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.514917 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="be8b6197-e756-451b-b6b5-b59d1738da2c" containerName="mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.515053 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="be8b6197-e756-451b-b6b5-b59d1738da2c" containerName="mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.515595 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.525384 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.660161 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j8sb\" (UniqueName: \"kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb\") pod \"mariadb-client\" (UID: \"11a1a1dc-07b4-4f3e-ba96-23142ba36c03\") " pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.762256 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j8sb\" (UniqueName: \"kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb\") pod \"mariadb-client\" (UID: \"11a1a1dc-07b4-4f3e-ba96-23142ba36c03\") " pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.785512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j8sb\" (UniqueName: \"kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb\") pod \"mariadb-client\" (UID: \"11a1a1dc-07b4-4f3e-ba96-23142ba36c03\") " pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.842665 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.933751 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="075738e0f2a5b239b8e962a01e053cce32cb0f971eff26d8c5c47efcd904d8cd" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.933831 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:40 crc kubenswrapper[4852]: I0129 12:06:40.954516 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="be8b6197-e756-451b-b6b5-b59d1738da2c" podUID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" Jan 29 12:06:41 crc kubenswrapper[4852]: I0129 12:06:41.278317 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:41 crc kubenswrapper[4852]: W0129 12:06:41.279600 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11a1a1dc_07b4_4f3e_ba96_23142ba36c03.slice/crio-61c253ce75a070bf6688b7edcccbb6b4c7d86e50eac82ef079bca809678f2bb6 WatchSource:0}: Error finding container 61c253ce75a070bf6688b7edcccbb6b4c7d86e50eac82ef079bca809678f2bb6: Status 404 returned error can't find the container with id 61c253ce75a070bf6688b7edcccbb6b4c7d86e50eac82ef079bca809678f2bb6 Jan 29 12:06:41 crc kubenswrapper[4852]: I0129 12:06:41.479084 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be8b6197-e756-451b-b6b5-b59d1738da2c" path="/var/lib/kubelet/pods/be8b6197-e756-451b-b6b5-b59d1738da2c/volumes" Jan 29 12:06:41 crc kubenswrapper[4852]: I0129 12:06:41.963972 4852 generic.go:334] "Generic (PLEG): container finished" podID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" containerID="7e0f252eaea396b66378a832448e8c7fc86883da3bf78529e4176c30e26251f7" exitCode=0 Jan 29 12:06:41 crc kubenswrapper[4852]: I0129 12:06:41.964062 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"11a1a1dc-07b4-4f3e-ba96-23142ba36c03","Type":"ContainerDied","Data":"7e0f252eaea396b66378a832448e8c7fc86883da3bf78529e4176c30e26251f7"} Jan 29 12:06:41 crc kubenswrapper[4852]: I0129 12:06:41.964095 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"11a1a1dc-07b4-4f3e-ba96-23142ba36c03","Type":"ContainerStarted","Data":"61c253ce75a070bf6688b7edcccbb6b4c7d86e50eac82ef079bca809678f2bb6"} Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.299105 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.323674 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_11a1a1dc-07b4-4f3e-ba96-23142ba36c03/mariadb-client/0.log" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.355379 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.360894 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.410235 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4j8sb\" (UniqueName: \"kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb\") pod \"11a1a1dc-07b4-4f3e-ba96-23142ba36c03\" (UID: \"11a1a1dc-07b4-4f3e-ba96-23142ba36c03\") " Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.422509 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb" (OuterVolumeSpecName: "kube-api-access-4j8sb") pod "11a1a1dc-07b4-4f3e-ba96-23142ba36c03" (UID: "11a1a1dc-07b4-4f3e-ba96-23142ba36c03"). InnerVolumeSpecName "kube-api-access-4j8sb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.487191 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" path="/var/lib/kubelet/pods/11a1a1dc-07b4-4f3e-ba96-23142ba36c03/volumes" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.523125 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4j8sb\" (UniqueName: \"kubernetes.io/projected/11a1a1dc-07b4-4f3e-ba96-23142ba36c03-kube-api-access-4j8sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.982722 4852 scope.go:117] "RemoveContainer" containerID="7e0f252eaea396b66378a832448e8c7fc86883da3bf78529e4176c30e26251f7" Jan 29 12:06:43 crc kubenswrapper[4852]: I0129 12:06:43.982790 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 12:06:49 crc kubenswrapper[4852]: I0129 12:06:49.243948 4852 scope.go:117] "RemoveContainer" containerID="c17b4aa02b83e31f0b8b187f47a1c97f7f1d39964d1364cf1e0a05f2845e3e4f" Jan 29 12:06:53 crc kubenswrapper[4852]: I0129 12:06:53.473156 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:06:53 crc kubenswrapper[4852]: E0129 12:06:53.474382 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:07:07 crc kubenswrapper[4852]: I0129 12:07:07.463563 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:07:07 crc kubenswrapper[4852]: E0129 12:07:07.464483 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:07:20 crc kubenswrapper[4852]: I0129 12:07:20.464029 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:07:20 crc kubenswrapper[4852]: E0129 12:07:20.464889 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:07:32 crc kubenswrapper[4852]: I0129 12:07:32.463798 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:07:32 crc kubenswrapper[4852]: E0129 12:07:32.464730 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:07:46 crc kubenswrapper[4852]: I0129 12:07:46.463991 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:07:46 crc kubenswrapper[4852]: E0129 12:07:46.464809 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:08:00 crc kubenswrapper[4852]: I0129 12:08:00.464061 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:08:00 crc kubenswrapper[4852]: E0129 12:08:00.465088 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:08:13 crc kubenswrapper[4852]: I0129 12:08:13.474409 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:08:13 crc kubenswrapper[4852]: E0129 12:08:13.475767 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:08:27 crc kubenswrapper[4852]: I0129 12:08:27.464109 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:08:27 crc kubenswrapper[4852]: E0129 12:08:27.465222 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:08:42 crc kubenswrapper[4852]: I0129 12:08:42.464557 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:08:42 crc kubenswrapper[4852]: E0129 12:08:42.465851 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:08:56 crc kubenswrapper[4852]: I0129 12:08:56.464428 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:08:56 crc kubenswrapper[4852]: E0129 12:08:56.465940 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.379889 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 12:09:01 crc kubenswrapper[4852]: E0129 12:09:01.381213 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" containerName="mariadb-client" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.381253 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" containerName="mariadb-client" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.381672 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="11a1a1dc-07b4-4f3e-ba96-23142ba36c03" containerName="mariadb-client" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.383126 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.386402 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.387027 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.387431 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-p4cw6" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.405789 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.424288 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.426138 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.436623 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.438690 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.448670 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.459327 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.573528 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.573912 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574037 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599ccb34-f477-4540-a025-c0dd9cde5861-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574194 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpvkk\" (UniqueName: \"kubernetes.io/projected/a7067e5b-383f-4726-af68-1011d2bed65f-kube-api-access-fpvkk\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574318 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a7067e5b-383f-4726-af68-1011d2bed65f-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574435 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-config\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574552 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-config\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574707 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574843 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-config\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.574959 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5582ad6-370b-4202-964b-511f90ec4b23-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.575063 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng7kh\" (UniqueName: \"kubernetes.io/projected/599ccb34-f477-4540-a025-c0dd9cde5861-kube-api-access-ng7kh\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.575184 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7067e5b-383f-4726-af68-1011d2bed65f-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.575519 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.575657 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5582ad6-370b-4202-964b-511f90ec4b23-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.575856 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-234271b7-60b0-4b30-9569-dd1024138b6d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-234271b7-60b0-4b30-9569-dd1024138b6d\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.576368 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr5bv\" (UniqueName: \"kubernetes.io/projected/d5582ad6-370b-4202-964b-511f90ec4b23-kube-api-access-tr5bv\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.576504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b977a2d-5966-486d-9170-67e4c080fc37\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b977a2d-5966-486d-9170-67e4c080fc37\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.576653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/599ccb34-f477-4540-a025-c0dd9cde5861-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.578234 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.579557 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.581728 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.581924 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.585404 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.587039 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.587576 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-n7nsj" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.594351 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.601715 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.603454 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.612291 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.618744 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677632 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677678 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5582ad6-370b-4202-964b-511f90ec4b23-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677726 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-config\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677778 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-234271b7-60b0-4b30-9569-dd1024138b6d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-234271b7-60b0-4b30-9569-dd1024138b6d\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677895 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr5bv\" (UniqueName: \"kubernetes.io/projected/d5582ad6-370b-4202-964b-511f90ec4b23-kube-api-access-tr5bv\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677923 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b977a2d-5966-486d-9170-67e4c080fc37\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b977a2d-5966-486d-9170-67e4c080fc37\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.677946 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccd95a41-97ff-4038-9343-17bb0198af4f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678185 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/599ccb34-f477-4540-a025-c0dd9cde5861-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678389 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d5582ad6-370b-4202-964b-511f90ec4b23-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678446 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678475 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678494 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678513 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-config\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678551 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599ccb34-f477-4540-a025-c0dd9cde5861-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678573 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grqht\" (UniqueName: \"kubernetes.io/projected/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-kube-api-access-grqht\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678634 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ccd95a41-97ff-4038-9343-17bb0198af4f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678663 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678728 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpvkk\" (UniqueName: \"kubernetes.io/projected/a7067e5b-383f-4726-af68-1011d2bed65f-kube-api-access-fpvkk\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678746 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a7067e5b-383f-4726-af68-1011d2bed65f-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678784 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-config\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678804 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfwb6\" (UniqueName: \"kubernetes.io/projected/ccd95a41-97ff-4038-9343-17bb0198af4f-kube-api-access-lfwb6\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678831 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-config\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678875 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-config\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678945 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5582ad6-370b-4202-964b-511f90ec4b23-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.678964 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng7kh\" (UniqueName: \"kubernetes.io/projected/599ccb34-f477-4540-a025-c0dd9cde5861-kube-api-access-ng7kh\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.679182 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.679206 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7067e5b-383f-4726-af68-1011d2bed65f-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.679221 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/599ccb34-f477-4540-a025-c0dd9cde5861-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.680173 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a7067e5b-383f-4726-af68-1011d2bed65f-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.680275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-config\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.680302 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.680573 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/599ccb34-f477-4540-a025-c0dd9cde5861-config\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.682010 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5582ad6-370b-4202-964b-511f90ec4b23-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.682053 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.682460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7067e5b-383f-4726-af68-1011d2bed65f-config\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.688793 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7067e5b-383f-4726-af68-1011d2bed65f-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.689107 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599ccb34-f477-4540-a025-c0dd9cde5861-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.690332 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5582ad6-370b-4202-964b-511f90ec4b23-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695379 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695422 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-234271b7-60b0-4b30-9569-dd1024138b6d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-234271b7-60b0-4b30-9569-dd1024138b6d\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7a327d6d2cdb0ceaf7d468b4b38d388c682f369b106e7faf2819a5663fc24a57/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695484 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695379 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695524 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b977a2d-5966-486d-9170-67e4c080fc37\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b977a2d-5966-486d-9170-67e4c080fc37\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/23218cd27bf4b67e2b2bf5d9943c0ead4f8d9479fa0c793232268af50f366d98/globalmount\"" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.695521 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/edac135a3b67f3eeaa15bfa11375f32ccdd3aca9e802eb7b0263b9e85898f2b8/globalmount\"" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.700161 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng7kh\" (UniqueName: \"kubernetes.io/projected/599ccb34-f477-4540-a025-c0dd9cde5861-kube-api-access-ng7kh\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.702134 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpvkk\" (UniqueName: \"kubernetes.io/projected/a7067e5b-383f-4726-af68-1011d2bed65f-kube-api-access-fpvkk\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.703755 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr5bv\" (UniqueName: \"kubernetes.io/projected/d5582ad6-370b-4202-964b-511f90ec4b23-kube-api-access-tr5bv\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.734527 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b977a2d-5966-486d-9170-67e4c080fc37\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b977a2d-5966-486d-9170-67e4c080fc37\") pod \"ovsdbserver-nb-2\" (UID: \"599ccb34-f477-4540-a025-c0dd9cde5861\") " pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.743460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-234271b7-60b0-4b30-9569-dd1024138b6d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-234271b7-60b0-4b30-9569-dd1024138b6d\") pod \"ovsdbserver-nb-0\" (UID: \"d5582ad6-370b-4202-964b-511f90ec4b23\") " pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.748504 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-51c97c51-49fd-4347-bbf6-85d19538db5a\") pod \"ovsdbserver-nb-1\" (UID: \"a7067e5b-383f-4726-af68-1011d2bed65f\") " pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.765685 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.774448 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781230 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781302 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781331 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781410 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-config\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781477 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781508 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-config\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781560 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccd95a41-97ff-4038-9343-17bb0198af4f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781621 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781666 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781707 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6r9z\" (UniqueName: \"kubernetes.io/projected/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-kube-api-access-p6r9z\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781786 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-config\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781819 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grqht\" (UniqueName: \"kubernetes.io/projected/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-kube-api-access-grqht\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781852 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ccd95a41-97ff-4038-9343-17bb0198af4f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781881 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781925 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.781960 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfwb6\" (UniqueName: \"kubernetes.io/projected/ccd95a41-97ff-4038-9343-17bb0198af4f-kube-api-access-lfwb6\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.784176 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-config\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.787750 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.788391 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ccd95a41-97ff-4038-9343-17bb0198af4f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.788898 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.790747 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.791119 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.791302 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.791372 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/add376bec1a816910c6c6234e7ae0f85b3e9bde0add3cb31bd098bebfa959d91/globalmount\"" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.797440 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccd95a41-97ff-4038-9343-17bb0198af4f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.798917 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccd95a41-97ff-4038-9343-17bb0198af4f-config\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.801813 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.801871 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5125ec24e0b6fcc12b6b5633bc8cd230f0c9b2b7f6fb64ad6f98844c27fc97df/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.802176 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfwb6\" (UniqueName: \"kubernetes.io/projected/ccd95a41-97ff-4038-9343-17bb0198af4f-kube-api-access-lfwb6\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.807847 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grqht\" (UniqueName: \"kubernetes.io/projected/29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9-kube-api-access-grqht\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.836440 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-caaf0ae8-7fbc-4e98-ae7b-4bb24554053d\") pod \"ovsdbserver-sb-2\" (UID: \"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9\") " pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.841815 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8db1f215-916b-40ef-bdbd-b43882ed69b3\") pod \"ovsdbserver-sb-0\" (UID: \"ccd95a41-97ff-4038-9343-17bb0198af4f\") " pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.884698 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6r9z\" (UniqueName: \"kubernetes.io/projected/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-kube-api-access-p6r9z\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.884810 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.884915 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.884940 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.884971 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.885124 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-config\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.885869 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.886543 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-config\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.887821 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.896956 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.896996 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7c056b6da1e948d1e1b781c3b2f18b61a3e10aee95e49587f7791b44ffdd01c6/globalmount\"" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.897916 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.905987 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6r9z\" (UniqueName: \"kubernetes.io/projected/52bbe7af-9fd8-4653-b05c-edd036fc3c9e-kube-api-access-p6r9z\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.912861 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.923745 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:01 crc kubenswrapper[4852]: I0129 12:09:01.953079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b076c6ea-00da-4524-9347-e0bd1c8c90c9\") pod \"ovsdbserver-sb-1\" (UID: \"52bbe7af-9fd8-4653-b05c-edd036fc3c9e\") " pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:02 crc kubenswrapper[4852]: I0129 12:09:02.024174 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:02.235179 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:02.326522 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:02.397830 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 12:09:03 crc kubenswrapper[4852]: W0129 12:09:02.402284 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7067e5b_383f_4726_af68_1011d2bed65f.slice/crio-cb9c13c85b912f42587c9f5d4c8838e392257ebbb8e742e0275db2fff83d4cc1 WatchSource:0}: Error finding container cb9c13c85b912f42587c9f5d4c8838e392257ebbb8e742e0275db2fff83d4cc1: Status 404 returned error can't find the container with id cb9c13c85b912f42587c9f5d4c8838e392257ebbb8e742e0275db2fff83d4cc1 Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.284673 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a7067e5b-383f-4726-af68-1011d2bed65f","Type":"ContainerStarted","Data":"56785b3a9a6c5266afffad7cc739d28989d589ac3dc83ee7d2fa2c1796d85df8"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.285012 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a7067e5b-383f-4726-af68-1011d2bed65f","Type":"ContainerStarted","Data":"e7c0f5a3f532c7b76d57ff2ebe0b8abbf50f1d3d09989d62fb98c976a3f1c642"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.285024 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a7067e5b-383f-4726-af68-1011d2bed65f","Type":"ContainerStarted","Data":"cb9c13c85b912f42587c9f5d4c8838e392257ebbb8e742e0275db2fff83d4cc1"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.287333 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"599ccb34-f477-4540-a025-c0dd9cde5861","Type":"ContainerStarted","Data":"059da7dc59cfb1ea1fb5d0389ebca93eeb010dab76840b4c0dc8f8b68e06c3ce"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.287380 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"599ccb34-f477-4540-a025-c0dd9cde5861","Type":"ContainerStarted","Data":"b5f6d7ef3b7ce43253b1520fd0da2e2d3fab359ddf901092f433f8fc4e8e4b68"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.287397 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"599ccb34-f477-4540-a025-c0dd9cde5861","Type":"ContainerStarted","Data":"4a5f6d202d52fc38c6d4e02221bfd39fce27be39ecd0be3d285752def5464ed1"} Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.304448 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.304426808 podStartE2EDuration="3.304426808s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:03.301062255 +0000 UTC m=+5240.518393389" watchObservedRunningTime="2026-01-29 12:09:03.304426808 +0000 UTC m=+5240.521757942" Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.555861 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.555834408 podStartE2EDuration="3.555834408s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:03.322236521 +0000 UTC m=+5240.539567655" watchObservedRunningTime="2026-01-29 12:09:03.555834408 +0000 UTC m=+5240.773165542" Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.559202 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 12:09:03 crc kubenswrapper[4852]: W0129 12:09:03.564858 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podccd95a41_97ff_4038_9343_17bb0198af4f.slice/crio-c4063fa2d8d744ff145a21c365160352e035e27a3ac1731021e9a41122515520 WatchSource:0}: Error finding container c4063fa2d8d744ff145a21c365160352e035e27a3ac1731021e9a41122515520: Status 404 returned error can't find the container with id c4063fa2d8d744ff145a21c365160352e035e27a3ac1731021e9a41122515520 Jan 29 12:09:03 crc kubenswrapper[4852]: I0129 12:09:03.653106 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.159550 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 12:09:04 crc kubenswrapper[4852]: W0129 12:09:04.166419 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29ba0ffa_0a78_4ef9_82cf_7230e36cf3c9.slice/crio-94afab6bd85293ec1c8bedead1cd4f211c4a6e2b06316cd00da8982969607688 WatchSource:0}: Error finding container 94afab6bd85293ec1c8bedead1cd4f211c4a6e2b06316cd00da8982969607688: Status 404 returned error can't find the container with id 94afab6bd85293ec1c8bedead1cd4f211c4a6e2b06316cd00da8982969607688 Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.267625 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 12:09:04 crc kubenswrapper[4852]: W0129 12:09:04.269268 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5582ad6_370b_4202_964b_511f90ec4b23.slice/crio-5cae2e7415be9d6b48b40ba0568529ab1405be367f80b6d2e89fdd35382a2db0 WatchSource:0}: Error finding container 5cae2e7415be9d6b48b40ba0568529ab1405be367f80b6d2e89fdd35382a2db0: Status 404 returned error can't find the container with id 5cae2e7415be9d6b48b40ba0568529ab1405be367f80b6d2e89fdd35382a2db0 Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.306538 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9","Type":"ContainerStarted","Data":"94afab6bd85293ec1c8bedead1cd4f211c4a6e2b06316cd00da8982969607688"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.309193 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ccd95a41-97ff-4038-9343-17bb0198af4f","Type":"ContainerStarted","Data":"67676a8e8c20389485db478ede8088f83b41aecad8c23eaaeb8091575f941571"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.309241 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ccd95a41-97ff-4038-9343-17bb0198af4f","Type":"ContainerStarted","Data":"da2d978da44e50ec9083f6b84efe372b48294f136e6777d9eb8132a95b3513fe"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.309251 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ccd95a41-97ff-4038-9343-17bb0198af4f","Type":"ContainerStarted","Data":"c4063fa2d8d744ff145a21c365160352e035e27a3ac1731021e9a41122515520"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.312009 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"52bbe7af-9fd8-4653-b05c-edd036fc3c9e","Type":"ContainerStarted","Data":"4c61d66211df259715b133121abbd439683ee37eab7869bfbf536fc20dfa38e2"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.312038 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"52bbe7af-9fd8-4653-b05c-edd036fc3c9e","Type":"ContainerStarted","Data":"f67d6e68387a1f574fb8499b52ed831c3e9cde61aa5d6d69f930fda1bdae3dd6"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.312049 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"52bbe7af-9fd8-4653-b05c-edd036fc3c9e","Type":"ContainerStarted","Data":"eb385c0e3efbabb553ed0a43abd178eba80a7eda13656de062956bd107c2a732"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.313535 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d5582ad6-370b-4202-964b-511f90ec4b23","Type":"ContainerStarted","Data":"5cae2e7415be9d6b48b40ba0568529ab1405be367f80b6d2e89fdd35382a2db0"} Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.339529 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=4.339504258 podStartE2EDuration="4.339504258s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:04.3305115 +0000 UTC m=+5241.547842644" watchObservedRunningTime="2026-01-29 12:09:04.339504258 +0000 UTC m=+5241.556835412" Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.346243 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=4.346228573 podStartE2EDuration="4.346228573s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:04.344865239 +0000 UTC m=+5241.562196373" watchObservedRunningTime="2026-01-29 12:09:04.346228573 +0000 UTC m=+5241.563559707" Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.766555 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.774700 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:04 crc kubenswrapper[4852]: I0129 12:09:04.913997 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.235259 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.328276 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9","Type":"ContainerStarted","Data":"76c5734c735e45c746733ecb66f0aaf2cc22d4078651c56c109a233c35132856"} Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.328372 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9","Type":"ContainerStarted","Data":"04d40383cef869c6d325c1915dc70cd55227c76c04ca686553a67092b448c418"} Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.331699 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d5582ad6-370b-4202-964b-511f90ec4b23","Type":"ContainerStarted","Data":"d1d79427358f64147cf150ede4b99a57493060b95e785e2230476b15f902e479"} Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.331753 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d5582ad6-370b-4202-964b-511f90ec4b23","Type":"ContainerStarted","Data":"0bfcecc7941916ce8972796761aa1d407ec23181419f9bca754b982540d2b44f"} Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.359177 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=5.359156173 podStartE2EDuration="5.359156173s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:05.351834515 +0000 UTC m=+5242.569165679" watchObservedRunningTime="2026-01-29 12:09:05.359156173 +0000 UTC m=+5242.576487327" Jan 29 12:09:05 crc kubenswrapper[4852]: I0129 12:09:05.392224 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=5.392206509 podStartE2EDuration="5.392206509s" podCreationTimestamp="2026-01-29 12:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:05.385557987 +0000 UTC m=+5242.602889121" watchObservedRunningTime="2026-01-29 12:09:05.392206509 +0000 UTC m=+5242.609537643" Jan 29 12:09:06 crc kubenswrapper[4852]: I0129 12:09:06.766420 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:06 crc kubenswrapper[4852]: I0129 12:09:06.775510 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:06 crc kubenswrapper[4852]: I0129 12:09:06.913098 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:06 crc kubenswrapper[4852]: I0129 12:09:06.924896 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.025495 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.235383 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.833647 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.834074 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.874029 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.885670 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Jan 29 12:09:07 crc kubenswrapper[4852]: I0129 12:09:07.924756 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.025529 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.119517 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.123635 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.128729 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.228345 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.230137 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.232145 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.241849 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.321473 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.402497 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.403297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.403368 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.403463 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.403538 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mn2n\" (UniqueName: \"kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.406270 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.518528 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.518664 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.518775 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.518926 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mn2n\" (UniqueName: \"kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.519885 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.519948 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.520518 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.572684 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mn2n\" (UniqueName: \"kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n\") pod \"dnsmasq-dns-7977bd746f-p7xjc\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.633296 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.634169 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.670689 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.672050 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.676604 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.681304 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.825232 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k6j2\" (UniqueName: \"kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.825309 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.825340 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.825394 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.825569 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.927099 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.927172 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k6j2\" (UniqueName: \"kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.927197 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.927218 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.927249 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.928184 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.928209 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.928524 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.929144 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:08 crc kubenswrapper[4852]: I0129 12:09:08.943366 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k6j2\" (UniqueName: \"kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2\") pod \"dnsmasq-dns-644c8fb5bc-8rrgm\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.042706 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.109523 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:09 crc kubenswrapper[4852]: W0129 12:09:09.118904 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65c8b086_f422_4a96_bddc_d1193696183c.slice/crio-5229406b3776363eee4c2a5841dd0d6477d42c3e8c6cca66d48ceb62aec270b8 WatchSource:0}: Error finding container 5229406b3776363eee4c2a5841dd0d6477d42c3e8c6cca66d48ceb62aec270b8: Status 404 returned error can't find the container with id 5229406b3776363eee4c2a5841dd0d6477d42c3e8c6cca66d48ceb62aec270b8 Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.380631 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" event={"ID":"65c8b086-f422-4a96-bddc-d1193696183c","Type":"ContainerStarted","Data":"5229406b3776363eee4c2a5841dd0d6477d42c3e8c6cca66d48ceb62aec270b8"} Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.428555 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.467873 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Jan 29 12:09:09 crc kubenswrapper[4852]: I0129 12:09:09.533288 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:09 crc kubenswrapper[4852]: W0129 12:09:09.541336 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1033639a_9c7a_4de6_a288_1568c2a20f2e.slice/crio-9f21da53ac7bb020687e733d7a01a2994c0db1a282b66088c5f279faf76bed5d WatchSource:0}: Error finding container 9f21da53ac7bb020687e733d7a01a2994c0db1a282b66088c5f279faf76bed5d: Status 404 returned error can't find the container with id 9f21da53ac7bb020687e733d7a01a2994c0db1a282b66088c5f279faf76bed5d Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.396741 4852 generic.go:334] "Generic (PLEG): container finished" podID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerID="237e3545aa9be491737feea60f676c2e45f559d8b396795f19e164fed175256b" exitCode=0 Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.396798 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" event={"ID":"1033639a-9c7a-4de6-a288-1568c2a20f2e","Type":"ContainerDied","Data":"237e3545aa9be491737feea60f676c2e45f559d8b396795f19e164fed175256b"} Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.397082 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" event={"ID":"1033639a-9c7a-4de6-a288-1568c2a20f2e","Type":"ContainerStarted","Data":"9f21da53ac7bb020687e733d7a01a2994c0db1a282b66088c5f279faf76bed5d"} Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.400424 4852 generic.go:334] "Generic (PLEG): container finished" podID="65c8b086-f422-4a96-bddc-d1193696183c" containerID="247e0621803b599125dd78e60c7e725bb6302089bc9470071941811976fa3340" exitCode=0 Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.400489 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" event={"ID":"65c8b086-f422-4a96-bddc-d1193696183c","Type":"ContainerDied","Data":"247e0621803b599125dd78e60c7e725bb6302089bc9470071941811976fa3340"} Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.463356 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:09:10 crc kubenswrapper[4852]: E0129 12:09:10.463854 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.702463 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.860637 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mn2n\" (UniqueName: \"kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n\") pod \"65c8b086-f422-4a96-bddc-d1193696183c\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.860965 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc\") pod \"65c8b086-f422-4a96-bddc-d1193696183c\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.862099 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config\") pod \"65c8b086-f422-4a96-bddc-d1193696183c\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.862177 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb\") pod \"65c8b086-f422-4a96-bddc-d1193696183c\" (UID: \"65c8b086-f422-4a96-bddc-d1193696183c\") " Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.876240 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n" (OuterVolumeSpecName: "kube-api-access-8mn2n") pod "65c8b086-f422-4a96-bddc-d1193696183c" (UID: "65c8b086-f422-4a96-bddc-d1193696183c"). InnerVolumeSpecName "kube-api-access-8mn2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.885911 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config" (OuterVolumeSpecName: "config") pod "65c8b086-f422-4a96-bddc-d1193696183c" (UID: "65c8b086-f422-4a96-bddc-d1193696183c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.897969 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "65c8b086-f422-4a96-bddc-d1193696183c" (UID: "65c8b086-f422-4a96-bddc-d1193696183c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.899368 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "65c8b086-f422-4a96-bddc-d1193696183c" (UID: "65c8b086-f422-4a96-bddc-d1193696183c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.964686 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.964995 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mn2n\" (UniqueName: \"kubernetes.io/projected/65c8b086-f422-4a96-bddc-d1193696183c-kube-api-access-8mn2n\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.965008 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:10 crc kubenswrapper[4852]: I0129 12:09:10.965023 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65c8b086-f422-4a96-bddc-d1193696183c-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.412684 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" event={"ID":"65c8b086-f422-4a96-bddc-d1193696183c","Type":"ContainerDied","Data":"5229406b3776363eee4c2a5841dd0d6477d42c3e8c6cca66d48ceb62aec270b8"} Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.412742 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7977bd746f-p7xjc" Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.412777 4852 scope.go:117] "RemoveContainer" containerID="247e0621803b599125dd78e60c7e725bb6302089bc9470071941811976fa3340" Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.415650 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" event={"ID":"1033639a-9c7a-4de6-a288-1568c2a20f2e","Type":"ContainerStarted","Data":"6061cae9d3ce13cadf92a7764d9f6662d4b90ba9ba7e1d53d15e19461607905a"} Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.415943 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.442099 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" podStartSLOduration=3.442072719 podStartE2EDuration="3.442072719s" podCreationTimestamp="2026-01-29 12:09:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:11.439249471 +0000 UTC m=+5248.656580655" watchObservedRunningTime="2026-01-29 12:09:11.442072719 +0000 UTC m=+5248.659403853" Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.513613 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:11 crc kubenswrapper[4852]: I0129 12:09:11.520970 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7977bd746f-p7xjc"] Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.663071 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Jan 29 12:09:12 crc kubenswrapper[4852]: E0129 12:09:12.663823 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65c8b086-f422-4a96-bddc-d1193696183c" containerName="init" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.663843 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="65c8b086-f422-4a96-bddc-d1193696183c" containerName="init" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.664049 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="65c8b086-f422-4a96-bddc-d1193696183c" containerName="init" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.664713 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.669296 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.675850 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.801235 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.801351 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.801437 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptxhn\" (UniqueName: \"kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.903152 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.903331 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.903392 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptxhn\" (UniqueName: \"kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.909391 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.909442 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9d4f2e215a182ae5cd455ffe8dcaa65ae5818c05153cbe10f4e78892639d79aa/globalmount\"" pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.910710 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.921646 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptxhn\" (UniqueName: \"kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.948700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") pod \"ovn-copy-data\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " pod="openstack/ovn-copy-data" Jan 29 12:09:12 crc kubenswrapper[4852]: I0129 12:09:12.988067 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 12:09:13 crc kubenswrapper[4852]: I0129 12:09:13.478552 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65c8b086-f422-4a96-bddc-d1193696183c" path="/var/lib/kubelet/pods/65c8b086-f422-4a96-bddc-d1193696183c/volumes" Jan 29 12:09:13 crc kubenswrapper[4852]: I0129 12:09:13.500066 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 12:09:13 crc kubenswrapper[4852]: W0129 12:09:13.503893 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8fdece6_3b51_4de5_88f4_f32be06904da.slice/crio-896dd0e82fa371cf9a548496fc86b05934940b95fe9e2114c4f40e789cca0159 WatchSource:0}: Error finding container 896dd0e82fa371cf9a548496fc86b05934940b95fe9e2114c4f40e789cca0159: Status 404 returned error can't find the container with id 896dd0e82fa371cf9a548496fc86b05934940b95fe9e2114c4f40e789cca0159 Jan 29 12:09:14 crc kubenswrapper[4852]: I0129 12:09:14.452752 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d8fdece6-3b51-4de5-88f4-f32be06904da","Type":"ContainerStarted","Data":"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3"} Jan 29 12:09:14 crc kubenswrapper[4852]: I0129 12:09:14.453155 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d8fdece6-3b51-4de5-88f4-f32be06904da","Type":"ContainerStarted","Data":"896dd0e82fa371cf9a548496fc86b05934940b95fe9e2114c4f40e789cca0159"} Jan 29 12:09:14 crc kubenswrapper[4852]: I0129 12:09:14.487577 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.487521385 podStartE2EDuration="3.487521385s" podCreationTimestamp="2026-01-29 12:09:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:14.47913495 +0000 UTC m=+5251.696466094" watchObservedRunningTime="2026-01-29 12:09:14.487521385 +0000 UTC m=+5251.704852569" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.045818 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.118117 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.118340 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="dnsmasq-dns" containerID="cri-o://051f0ebce9efe5fa0095e62c13ba255e9c9c6e1ef4c3885aedbe6eb22bbf8b57" gracePeriod=10 Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.520170 4852 generic.go:334] "Generic (PLEG): container finished" podID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerID="051f0ebce9efe5fa0095e62c13ba255e9c9c6e1ef4c3885aedbe6eb22bbf8b57" exitCode=0 Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.520249 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" event={"ID":"84413885-86dc-4801-bb4f-e6a0e88875d0","Type":"ContainerDied","Data":"051f0ebce9efe5fa0095e62c13ba255e9c9c6e1ef4c3885aedbe6eb22bbf8b57"} Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.657543 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.680224 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 29 12:09:19 crc kubenswrapper[4852]: E0129 12:09:19.680640 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="init" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.680659 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="init" Jan 29 12:09:19 crc kubenswrapper[4852]: E0129 12:09:19.680678 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="dnsmasq-dns" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.680687 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="dnsmasq-dns" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.680887 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" containerName="dnsmasq-dns" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.681821 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.685422 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.685455 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.686029 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-qwl4j" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.695987 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.720018 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgb45\" (UniqueName: \"kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45\") pod \"84413885-86dc-4801-bb4f-e6a0e88875d0\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.720236 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc\") pod \"84413885-86dc-4801-bb4f-e6a0e88875d0\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.720356 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config\") pod \"84413885-86dc-4801-bb4f-e6a0e88875d0\" (UID: \"84413885-86dc-4801-bb4f-e6a0e88875d0\") " Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.743825 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45" (OuterVolumeSpecName: "kube-api-access-rgb45") pod "84413885-86dc-4801-bb4f-e6a0e88875d0" (UID: "84413885-86dc-4801-bb4f-e6a0e88875d0"). InnerVolumeSpecName "kube-api-access-rgb45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.778283 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84413885-86dc-4801-bb4f-e6a0e88875d0" (UID: "84413885-86dc-4801-bb4f-e6a0e88875d0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.780805 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config" (OuterVolumeSpecName: "config") pod "84413885-86dc-4801-bb4f-e6a0e88875d0" (UID: "84413885-86dc-4801-bb4f-e6a0e88875d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.822551 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-scripts\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.822630 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6rhb\" (UniqueName: \"kubernetes.io/projected/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-kube-api-access-s6rhb\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.822659 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-config\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.822734 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.822822 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.823002 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.823045 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84413885-86dc-4801-bb4f-e6a0e88875d0-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.823056 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgb45\" (UniqueName: \"kubernetes.io/projected/84413885-86dc-4801-bb4f-e6a0e88875d0-kube-api-access-rgb45\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.924966 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.925032 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.925149 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-scripts\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.925212 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6rhb\" (UniqueName: \"kubernetes.io/projected/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-kube-api-access-s6rhb\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.925260 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-config\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.926875 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-config\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.927183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.927495 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-scripts\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.932922 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:19 crc kubenswrapper[4852]: I0129 12:09:19.943091 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6rhb\" (UniqueName: \"kubernetes.io/projected/51e5b1b6-ed8d-4369-acea-ddc62c5a8945-kube-api-access-s6rhb\") pod \"ovn-northd-0\" (UID: \"51e5b1b6-ed8d-4369-acea-ddc62c5a8945\") " pod="openstack/ovn-northd-0" Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.010856 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.465638 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 12:09:20 crc kubenswrapper[4852]: W0129 12:09:20.475834 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51e5b1b6_ed8d_4369_acea_ddc62c5a8945.slice/crio-286c0232160623bc15b362ab636a3cec22b1935fae908a4dc836a4cb71d2fa70 WatchSource:0}: Error finding container 286c0232160623bc15b362ab636a3cec22b1935fae908a4dc836a4cb71d2fa70: Status 404 returned error can't find the container with id 286c0232160623bc15b362ab636a3cec22b1935fae908a4dc836a4cb71d2fa70 Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.546406 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" event={"ID":"84413885-86dc-4801-bb4f-e6a0e88875d0","Type":"ContainerDied","Data":"a5c7cbb65c952d993beda88c85824cc5e3f9d8aa7e6a9593a013ab9e0bbb305d"} Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.546489 4852 scope.go:117] "RemoveContainer" containerID="051f0ebce9efe5fa0095e62c13ba255e9c9c6e1ef4c3885aedbe6eb22bbf8b57" Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.546441 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-q7qjb" Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.548497 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"51e5b1b6-ed8d-4369-acea-ddc62c5a8945","Type":"ContainerStarted","Data":"286c0232160623bc15b362ab636a3cec22b1935fae908a4dc836a4cb71d2fa70"} Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.589603 4852 scope.go:117] "RemoveContainer" containerID="490a189ae9f93b5acb18adc67debf9fa1d732f5bb7a11678b70b4125bb5cb02b" Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.615551 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:09:20 crc kubenswrapper[4852]: I0129 12:09:20.622110 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-q7qjb"] Jan 29 12:09:21 crc kubenswrapper[4852]: I0129 12:09:21.475683 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84413885-86dc-4801-bb4f-e6a0e88875d0" path="/var/lib/kubelet/pods/84413885-86dc-4801-bb4f-e6a0e88875d0/volumes" Jan 29 12:09:21 crc kubenswrapper[4852]: I0129 12:09:21.562118 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"51e5b1b6-ed8d-4369-acea-ddc62c5a8945","Type":"ContainerStarted","Data":"72912ad0abe010123261d530aad435f62a761a884950c11de22dab3e6bb5a4d5"} Jan 29 12:09:21 crc kubenswrapper[4852]: I0129 12:09:21.562169 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"51e5b1b6-ed8d-4369-acea-ddc62c5a8945","Type":"ContainerStarted","Data":"82341a8925a621bbbdfbc7fcf0e982a81f95d70c9df63d5a85cabb661e7aba42"} Jan 29 12:09:21 crc kubenswrapper[4852]: I0129 12:09:21.562294 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 29 12:09:21 crc kubenswrapper[4852]: I0129 12:09:21.583282 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.58325091 podStartE2EDuration="2.58325091s" podCreationTimestamp="2026-01-29 12:09:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:21.578682587 +0000 UTC m=+5258.796013721" watchObservedRunningTime="2026-01-29 12:09:21.58325091 +0000 UTC m=+5258.800582084" Jan 29 12:09:23 crc kubenswrapper[4852]: I0129 12:09:23.471679 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:09:23 crc kubenswrapper[4852]: E0129 12:09:23.472321 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.041591 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-blmr9"] Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.043266 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.049294 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-79b0-account-create-update-tphb5"] Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.051376 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.054692 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.057631 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-blmr9"] Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.065999 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-79b0-account-create-update-tphb5"] Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.117229 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.117297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.117647 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg7j4\" (UniqueName: \"kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.117722 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bwjd\" (UniqueName: \"kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.219153 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.219223 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.219302 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg7j4\" (UniqueName: \"kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.219323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bwjd\" (UniqueName: \"kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.220197 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.220240 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.238777 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bwjd\" (UniqueName: \"kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd\") pod \"keystone-db-create-blmr9\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.240103 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg7j4\" (UniqueName: \"kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4\") pod \"keystone-79b0-account-create-update-tphb5\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.409162 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.421965 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.893669 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-79b0-account-create-update-tphb5"] Jan 29 12:09:25 crc kubenswrapper[4852]: W0129 12:09:25.904411 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod829718a4_2107_42a2_b18a_41e1fbf9df79.slice/crio-8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0 WatchSource:0}: Error finding container 8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0: Status 404 returned error can't find the container with id 8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0 Jan 29 12:09:25 crc kubenswrapper[4852]: I0129 12:09:25.985419 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-blmr9"] Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.623959 4852 generic.go:334] "Generic (PLEG): container finished" podID="829718a4-2107-42a2-b18a-41e1fbf9df79" containerID="107757abec3842f4412c144564863962f627ba2e36569bf21ac5bb17a88a4390" exitCode=0 Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.624338 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-79b0-account-create-update-tphb5" event={"ID":"829718a4-2107-42a2-b18a-41e1fbf9df79","Type":"ContainerDied","Data":"107757abec3842f4412c144564863962f627ba2e36569bf21ac5bb17a88a4390"} Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.624368 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-79b0-account-create-update-tphb5" event={"ID":"829718a4-2107-42a2-b18a-41e1fbf9df79","Type":"ContainerStarted","Data":"8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0"} Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.626416 4852 generic.go:334] "Generic (PLEG): container finished" podID="4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" containerID="1a69e60a5621c2b326393b5e4a4496d2cc5c898287082dc411e371addf37438e" exitCode=0 Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.626437 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-blmr9" event={"ID":"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f","Type":"ContainerDied","Data":"1a69e60a5621c2b326393b5e4a4496d2cc5c898287082dc411e371addf37438e"} Jan 29 12:09:26 crc kubenswrapper[4852]: I0129 12:09:26.626450 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-blmr9" event={"ID":"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f","Type":"ContainerStarted","Data":"9d8a3600c582c1e13cce1cc502a6dd06f4dc605f023424c4c54a9452a63946d1"} Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.077341 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.083034 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.167492 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg7j4\" (UniqueName: \"kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4\") pod \"829718a4-2107-42a2-b18a-41e1fbf9df79\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.167555 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bwjd\" (UniqueName: \"kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd\") pod \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.167738 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts\") pod \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\" (UID: \"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f\") " Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.167787 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts\") pod \"829718a4-2107-42a2-b18a-41e1fbf9df79\" (UID: \"829718a4-2107-42a2-b18a-41e1fbf9df79\") " Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.168496 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "829718a4-2107-42a2-b18a-41e1fbf9df79" (UID: "829718a4-2107-42a2-b18a-41e1fbf9df79"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.168573 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" (UID: "4b7eb19d-cf79-43a6-8c6f-f86b6911b36f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.173016 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd" (OuterVolumeSpecName: "kube-api-access-5bwjd") pod "4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" (UID: "4b7eb19d-cf79-43a6-8c6f-f86b6911b36f"). InnerVolumeSpecName "kube-api-access-5bwjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.173852 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4" (OuterVolumeSpecName: "kube-api-access-pg7j4") pod "829718a4-2107-42a2-b18a-41e1fbf9df79" (UID: "829718a4-2107-42a2-b18a-41e1fbf9df79"). InnerVolumeSpecName "kube-api-access-pg7j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.269724 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg7j4\" (UniqueName: \"kubernetes.io/projected/829718a4-2107-42a2-b18a-41e1fbf9df79-kube-api-access-pg7j4\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.269772 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bwjd\" (UniqueName: \"kubernetes.io/projected/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-kube-api-access-5bwjd\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.269783 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.269791 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/829718a4-2107-42a2-b18a-41e1fbf9df79-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.651506 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-79b0-account-create-update-tphb5" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.651499 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-79b0-account-create-update-tphb5" event={"ID":"829718a4-2107-42a2-b18a-41e1fbf9df79","Type":"ContainerDied","Data":"8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0"} Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.651618 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8130583fed379c420858dabf620bcf78e78c9d04f3eef91767956b10eb6adcc0" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.653354 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-blmr9" event={"ID":"4b7eb19d-cf79-43a6-8c6f-f86b6911b36f","Type":"ContainerDied","Data":"9d8a3600c582c1e13cce1cc502a6dd06f4dc605f023424c4c54a9452a63946d1"} Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.653378 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d8a3600c582c1e13cce1cc502a6dd06f4dc605f023424c4c54a9452a63946d1" Jan 29 12:09:28 crc kubenswrapper[4852]: I0129 12:09:28.653489 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-blmr9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.105806 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.623653 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-8mbc9"] Jan 29 12:09:30 crc kubenswrapper[4852]: E0129 12:09:30.624262 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="829718a4-2107-42a2-b18a-41e1fbf9df79" containerName="mariadb-account-create-update" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.624277 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="829718a4-2107-42a2-b18a-41e1fbf9df79" containerName="mariadb-account-create-update" Jan 29 12:09:30 crc kubenswrapper[4852]: E0129 12:09:30.624305 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" containerName="mariadb-database-create" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.624311 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" containerName="mariadb-database-create" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.624472 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="829718a4-2107-42a2-b18a-41e1fbf9df79" containerName="mariadb-account-create-update" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.624492 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" containerName="mariadb-database-create" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.625038 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.626937 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.627081 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.627509 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.627913 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-z2fpk" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.639145 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-8mbc9"] Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.715285 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sthjm\" (UniqueName: \"kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.715380 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.715411 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.816847 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.816901 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.817074 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sthjm\" (UniqueName: \"kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.822265 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.822404 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.834512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sthjm\" (UniqueName: \"kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm\") pod \"keystone-db-sync-8mbc9\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:30 crc kubenswrapper[4852]: I0129 12:09:30.955702 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:31 crc kubenswrapper[4852]: I0129 12:09:31.486391 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-8mbc9"] Jan 29 12:09:31 crc kubenswrapper[4852]: I0129 12:09:31.678616 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-8mbc9" event={"ID":"c48a9f07-3553-4f8f-9b9d-fc9edfda284e","Type":"ContainerStarted","Data":"a2c94be05c04e22b7bfc962d45c276597af319f902856eaad47921a6f53dd915"} Jan 29 12:09:32 crc kubenswrapper[4852]: I0129 12:09:32.687739 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-8mbc9" event={"ID":"c48a9f07-3553-4f8f-9b9d-fc9edfda284e","Type":"ContainerStarted","Data":"08e496c5fedb80181c2d710ee121097d863ed8a258d51dacc0166e85143f5041"} Jan 29 12:09:32 crc kubenswrapper[4852]: I0129 12:09:32.719526 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-8mbc9" podStartSLOduration=2.7194923319999997 podStartE2EDuration="2.719492332s" podCreationTimestamp="2026-01-29 12:09:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:32.708196337 +0000 UTC m=+5269.925527461" watchObservedRunningTime="2026-01-29 12:09:32.719492332 +0000 UTC m=+5269.936823506" Jan 29 12:09:34 crc kubenswrapper[4852]: I0129 12:09:34.754201 4852 generic.go:334] "Generic (PLEG): container finished" podID="c48a9f07-3553-4f8f-9b9d-fc9edfda284e" containerID="08e496c5fedb80181c2d710ee121097d863ed8a258d51dacc0166e85143f5041" exitCode=0 Jan 29 12:09:34 crc kubenswrapper[4852]: I0129 12:09:34.754564 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-8mbc9" event={"ID":"c48a9f07-3553-4f8f-9b9d-fc9edfda284e","Type":"ContainerDied","Data":"08e496c5fedb80181c2d710ee121097d863ed8a258d51dacc0166e85143f5041"} Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.106795 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.216702 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle\") pod \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.216837 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sthjm\" (UniqueName: \"kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm\") pod \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.216878 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data\") pod \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\" (UID: \"c48a9f07-3553-4f8f-9b9d-fc9edfda284e\") " Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.223848 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm" (OuterVolumeSpecName: "kube-api-access-sthjm") pod "c48a9f07-3553-4f8f-9b9d-fc9edfda284e" (UID: "c48a9f07-3553-4f8f-9b9d-fc9edfda284e"). InnerVolumeSpecName "kube-api-access-sthjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.256243 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data" (OuterVolumeSpecName: "config-data") pod "c48a9f07-3553-4f8f-9b9d-fc9edfda284e" (UID: "c48a9f07-3553-4f8f-9b9d-fc9edfda284e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.264037 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c48a9f07-3553-4f8f-9b9d-fc9edfda284e" (UID: "c48a9f07-3553-4f8f-9b9d-fc9edfda284e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.319338 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.319374 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sthjm\" (UniqueName: \"kubernetes.io/projected/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-kube-api-access-sthjm\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.319390 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c48a9f07-3553-4f8f-9b9d-fc9edfda284e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.792470 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-8mbc9" event={"ID":"c48a9f07-3553-4f8f-9b9d-fc9edfda284e","Type":"ContainerDied","Data":"a2c94be05c04e22b7bfc962d45c276597af319f902856eaad47921a6f53dd915"} Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.792518 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2c94be05c04e22b7bfc962d45c276597af319f902856eaad47921a6f53dd915" Jan 29 12:09:36 crc kubenswrapper[4852]: I0129 12:09:36.792570 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-8mbc9" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.020464 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:09:37 crc kubenswrapper[4852]: E0129 12:09:37.021014 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48a9f07-3553-4f8f-9b9d-fc9edfda284e" containerName="keystone-db-sync" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.021034 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48a9f07-3553-4f8f-9b9d-fc9edfda284e" containerName="keystone-db-sync" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.021197 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c48a9f07-3553-4f8f-9b9d-fc9edfda284e" containerName="keystone-db-sync" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.022673 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.054687 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.082803 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jnz8x"] Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.083839 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.087162 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-z2fpk" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.087370 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.087615 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.087737 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.088097 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.091005 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jnz8x"] Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145490 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145626 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145715 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145742 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfprs\" (UniqueName: \"kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145778 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh6fg\" (UniqueName: \"kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145807 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145826 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145862 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145925 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145952 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.145974 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247098 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh6fg\" (UniqueName: \"kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247177 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247388 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247406 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247462 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247492 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247515 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247540 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247608 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247694 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.248711 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.248782 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.247725 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfprs\" (UniqueName: \"kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.249020 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.249028 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.256448 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.258001 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.259952 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.260105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.267213 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.270394 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh6fg\" (UniqueName: \"kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg\") pod \"dnsmasq-dns-67b648cdf7-qc6g4\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.274994 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfprs\" (UniqueName: \"kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs\") pod \"keystone-bootstrap-jnz8x\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.345320 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.405213 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:37 crc kubenswrapper[4852]: I0129 12:09:37.934382 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.034861 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jnz8x"] Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.464507 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:09:38 crc kubenswrapper[4852]: E0129 12:09:38.465140 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.817268 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jnz8x" event={"ID":"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4","Type":"ContainerStarted","Data":"bf0e68ca105d64c3d1b76652e2bcbb9cad83f2f2efa5b2d0b1a4abf35094d406"} Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.817335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jnz8x" event={"ID":"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4","Type":"ContainerStarted","Data":"b6ae3b09b8b77f806fe26bacfa2c708b8970a838468050503dc3a6c3ca11399f"} Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.819267 4852 generic.go:334] "Generic (PLEG): container finished" podID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerID="33b090e25cf31e848b47afb095ceea8ef307dc3de87a67ea6222b39387157639" exitCode=0 Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.819312 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" event={"ID":"c4728eef-3af4-42ef-bfae-110f0bbc61a6","Type":"ContainerDied","Data":"33b090e25cf31e848b47afb095ceea8ef307dc3de87a67ea6222b39387157639"} Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.819390 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" event={"ID":"c4728eef-3af4-42ef-bfae-110f0bbc61a6","Type":"ContainerStarted","Data":"c1a7dc38e74aa140c51b1a792dd29907997efad2b99f9ecb97d3600d3641548f"} Jan 29 12:09:38 crc kubenswrapper[4852]: I0129 12:09:38.878646 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jnz8x" podStartSLOduration=1.878623677 podStartE2EDuration="1.878623677s" podCreationTimestamp="2026-01-29 12:09:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:38.851728591 +0000 UTC m=+5276.069059755" watchObservedRunningTime="2026-01-29 12:09:38.878623677 +0000 UTC m=+5276.095954831" Jan 29 12:09:39 crc kubenswrapper[4852]: I0129 12:09:39.834224 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" event={"ID":"c4728eef-3af4-42ef-bfae-110f0bbc61a6","Type":"ContainerStarted","Data":"0363d19a7740b0482ef69695a7e3d8a0cc62df4729b16d8e69aa603f1f3f57ad"} Jan 29 12:09:39 crc kubenswrapper[4852]: I0129 12:09:39.834823 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:42 crc kubenswrapper[4852]: I0129 12:09:42.867645 4852 generic.go:334] "Generic (PLEG): container finished" podID="8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" containerID="bf0e68ca105d64c3d1b76652e2bcbb9cad83f2f2efa5b2d0b1a4abf35094d406" exitCode=0 Jan 29 12:09:42 crc kubenswrapper[4852]: I0129 12:09:42.867722 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jnz8x" event={"ID":"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4","Type":"ContainerDied","Data":"bf0e68ca105d64c3d1b76652e2bcbb9cad83f2f2efa5b2d0b1a4abf35094d406"} Jan 29 12:09:42 crc kubenswrapper[4852]: I0129 12:09:42.892107 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" podStartSLOduration=6.8920916089999995 podStartE2EDuration="6.892091609s" podCreationTimestamp="2026-01-29 12:09:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:39.864084638 +0000 UTC m=+5277.081415792" watchObservedRunningTime="2026-01-29 12:09:42.892091609 +0000 UTC m=+5280.109422743" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.253484 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.393599 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.393650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.393699 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.394894 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.395170 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfprs\" (UniqueName: \"kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.395286 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data\") pod \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\" (UID: \"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4\") " Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.400344 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts" (OuterVolumeSpecName: "scripts") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.400406 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.400795 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.407228 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs" (OuterVolumeSpecName: "kube-api-access-cfprs") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "kube-api-access-cfprs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.433682 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data" (OuterVolumeSpecName: "config-data") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.433797 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" (UID: "8b705ddf-3cea-4b2b-8e4b-839655b4e5a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498655 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfprs\" (UniqueName: \"kubernetes.io/projected/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-kube-api-access-cfprs\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498708 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498724 4852 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498735 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498748 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.498759 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.903187 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jnz8x" event={"ID":"8b705ddf-3cea-4b2b-8e4b-839655b4e5a4","Type":"ContainerDied","Data":"b6ae3b09b8b77f806fe26bacfa2c708b8970a838468050503dc3a6c3ca11399f"} Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.903243 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6ae3b09b8b77f806fe26bacfa2c708b8970a838468050503dc3a6c3ca11399f" Jan 29 12:09:44 crc kubenswrapper[4852]: I0129 12:09:44.903257 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jnz8x" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.071926 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jnz8x"] Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.078433 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jnz8x"] Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.164298 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-xspwv"] Jan 29 12:09:45 crc kubenswrapper[4852]: E0129 12:09:45.164892 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" containerName="keystone-bootstrap" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.164919 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" containerName="keystone-bootstrap" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.165174 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" containerName="keystone-bootstrap" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.165978 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.172733 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-z2fpk" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.172888 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.173061 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.173273 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.173556 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.185437 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xspwv"] Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318000 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318072 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrc4k\" (UniqueName: \"kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318098 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318193 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318328 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.318381 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420686 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrc4k\" (UniqueName: \"kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420818 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420854 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420921 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.420967 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.427462 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.428103 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.429512 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.431939 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.433022 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.442509 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrc4k\" (UniqueName: \"kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k\") pod \"keystone-bootstrap-xspwv\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.475738 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b705ddf-3cea-4b2b-8e4b-839655b4e5a4" path="/var/lib/kubelet/pods/8b705ddf-3cea-4b2b-8e4b-839655b4e5a4/volumes" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.522142 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:45 crc kubenswrapper[4852]: I0129 12:09:45.973433 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xspwv"] Jan 29 12:09:46 crc kubenswrapper[4852]: I0129 12:09:46.927370 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xspwv" event={"ID":"ed338fa4-983d-416b-95b7-0c6b388b9025","Type":"ContainerStarted","Data":"12a9892db3ae1e68670100f9f092ac08aaef25aaeb89752a18e351f5a33682c0"} Jan 29 12:09:46 crc kubenswrapper[4852]: I0129 12:09:46.927724 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xspwv" event={"ID":"ed338fa4-983d-416b-95b7-0c6b388b9025","Type":"ContainerStarted","Data":"4ed1a657928a19a36d5f698fa8fc12dc77031be15bf6bdc515f20cdd1e32a27b"} Jan 29 12:09:46 crc kubenswrapper[4852]: I0129 12:09:46.947306 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-xspwv" podStartSLOduration=1.947287127 podStartE2EDuration="1.947287127s" podCreationTimestamp="2026-01-29 12:09:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:46.945614246 +0000 UTC m=+5284.162945410" watchObservedRunningTime="2026-01-29 12:09:46.947287127 +0000 UTC m=+5284.164618261" Jan 29 12:09:47 crc kubenswrapper[4852]: I0129 12:09:47.346738 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:09:47 crc kubenswrapper[4852]: I0129 12:09:47.431404 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:47 crc kubenswrapper[4852]: I0129 12:09:47.431881 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="dnsmasq-dns" containerID="cri-o://6061cae9d3ce13cadf92a7764d9f6662d4b90ba9ba7e1d53d15e19461607905a" gracePeriod=10 Jan 29 12:09:47 crc kubenswrapper[4852]: I0129 12:09:47.938120 4852 generic.go:334] "Generic (PLEG): container finished" podID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerID="6061cae9d3ce13cadf92a7764d9f6662d4b90ba9ba7e1d53d15e19461607905a" exitCode=0 Jan 29 12:09:47 crc kubenswrapper[4852]: I0129 12:09:47.938205 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" event={"ID":"1033639a-9c7a-4de6-a288-1568c2a20f2e","Type":"ContainerDied","Data":"6061cae9d3ce13cadf92a7764d9f6662d4b90ba9ba7e1d53d15e19461607905a"} Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.079316 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.178292 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb\") pod \"1033639a-9c7a-4de6-a288-1568c2a20f2e\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.178463 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc\") pod \"1033639a-9c7a-4de6-a288-1568c2a20f2e\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.178517 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config\") pod \"1033639a-9c7a-4de6-a288-1568c2a20f2e\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.178539 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb\") pod \"1033639a-9c7a-4de6-a288-1568c2a20f2e\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.178624 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k6j2\" (UniqueName: \"kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2\") pod \"1033639a-9c7a-4de6-a288-1568c2a20f2e\" (UID: \"1033639a-9c7a-4de6-a288-1568c2a20f2e\") " Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.190105 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2" (OuterVolumeSpecName: "kube-api-access-4k6j2") pod "1033639a-9c7a-4de6-a288-1568c2a20f2e" (UID: "1033639a-9c7a-4de6-a288-1568c2a20f2e"). InnerVolumeSpecName "kube-api-access-4k6j2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.221565 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1033639a-9c7a-4de6-a288-1568c2a20f2e" (UID: "1033639a-9c7a-4de6-a288-1568c2a20f2e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.223047 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config" (OuterVolumeSpecName: "config") pod "1033639a-9c7a-4de6-a288-1568c2a20f2e" (UID: "1033639a-9c7a-4de6-a288-1568c2a20f2e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.223071 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1033639a-9c7a-4de6-a288-1568c2a20f2e" (UID: "1033639a-9c7a-4de6-a288-1568c2a20f2e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.248865 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1033639a-9c7a-4de6-a288-1568c2a20f2e" (UID: "1033639a-9c7a-4de6-a288-1568c2a20f2e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.280783 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.280843 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.280854 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.280866 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k6j2\" (UniqueName: \"kubernetes.io/projected/1033639a-9c7a-4de6-a288-1568c2a20f2e-kube-api-access-4k6j2\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.280895 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1033639a-9c7a-4de6-a288-1568c2a20f2e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.955221 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" event={"ID":"1033639a-9c7a-4de6-a288-1568c2a20f2e","Type":"ContainerDied","Data":"9f21da53ac7bb020687e733d7a01a2994c0db1a282b66088c5f279faf76bed5d"} Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.955734 4852 scope.go:117] "RemoveContainer" containerID="6061cae9d3ce13cadf92a7764d9f6662d4b90ba9ba7e1d53d15e19461607905a" Jan 29 12:09:48 crc kubenswrapper[4852]: I0129 12:09:48.955338 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644c8fb5bc-8rrgm" Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.018676 4852 scope.go:117] "RemoveContainer" containerID="237e3545aa9be491737feea60f676c2e45f559d8b396795f19e164fed175256b" Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.020492 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.027237 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-644c8fb5bc-8rrgm"] Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.477223 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" path="/var/lib/kubelet/pods/1033639a-9c7a-4de6-a288-1568c2a20f2e/volumes" Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.976231 4852 generic.go:334] "Generic (PLEG): container finished" podID="ed338fa4-983d-416b-95b7-0c6b388b9025" containerID="12a9892db3ae1e68670100f9f092ac08aaef25aaeb89752a18e351f5a33682c0" exitCode=0 Jan 29 12:09:49 crc kubenswrapper[4852]: I0129 12:09:49.976296 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xspwv" event={"ID":"ed338fa4-983d-416b-95b7-0c6b388b9025","Type":"ContainerDied","Data":"12a9892db3ae1e68670100f9f092ac08aaef25aaeb89752a18e351f5a33682c0"} Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.373037 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.439766 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.439821 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.439839 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.440653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.440726 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrc4k\" (UniqueName: \"kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.440765 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys\") pod \"ed338fa4-983d-416b-95b7-0c6b388b9025\" (UID: \"ed338fa4-983d-416b-95b7-0c6b388b9025\") " Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.449736 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.449753 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts" (OuterVolumeSpecName: "scripts") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.449812 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.449848 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k" (OuterVolumeSpecName: "kube-api-access-nrc4k") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "kube-api-access-nrc4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.465713 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data" (OuterVolumeSpecName: "config-data") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.467139 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed338fa4-983d-416b-95b7-0c6b388b9025" (UID: "ed338fa4-983d-416b-95b7-0c6b388b9025"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542132 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542436 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrc4k\" (UniqueName: \"kubernetes.io/projected/ed338fa4-983d-416b-95b7-0c6b388b9025-kube-api-access-nrc4k\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542447 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542455 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542463 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.542472 4852 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed338fa4-983d-416b-95b7-0c6b388b9025-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.994774 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xspwv" event={"ID":"ed338fa4-983d-416b-95b7-0c6b388b9025","Type":"ContainerDied","Data":"4ed1a657928a19a36d5f698fa8fc12dc77031be15bf6bdc515f20cdd1e32a27b"} Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.994816 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed1a657928a19a36d5f698fa8fc12dc77031be15bf6bdc515f20cdd1e32a27b" Jan 29 12:09:51 crc kubenswrapper[4852]: I0129 12:09:51.994834 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xspwv" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.086843 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7c85c65b76-hsllc"] Jan 29 12:09:52 crc kubenswrapper[4852]: E0129 12:09:52.087185 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="init" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087201 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="init" Jan 29 12:09:52 crc kubenswrapper[4852]: E0129 12:09:52.087220 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="dnsmasq-dns" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087226 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="dnsmasq-dns" Jan 29 12:09:52 crc kubenswrapper[4852]: E0129 12:09:52.087240 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed338fa4-983d-416b-95b7-0c6b388b9025" containerName="keystone-bootstrap" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087247 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed338fa4-983d-416b-95b7-0c6b388b9025" containerName="keystone-bootstrap" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087404 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1033639a-9c7a-4de6-a288-1568c2a20f2e" containerName="dnsmasq-dns" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087430 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed338fa4-983d-416b-95b7-0c6b388b9025" containerName="keystone-bootstrap" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.087942 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.089869 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-z2fpk" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.090100 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.091788 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.092548 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.101344 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c85c65b76-hsllc"] Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.151701 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-scripts\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.151967 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-combined-ca-bundle\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.152438 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq92d\" (UniqueName: \"kubernetes.io/projected/2a761271-dd86-465e-86af-c5ba36c7cb64-kube-api-access-dq92d\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.152537 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-fernet-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.152653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-credential-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.152877 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-config-data\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255395 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-config-data\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255621 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-scripts\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255681 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-combined-ca-bundle\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255786 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq92d\" (UniqueName: \"kubernetes.io/projected/2a761271-dd86-465e-86af-c5ba36c7cb64-kube-api-access-dq92d\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255834 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-fernet-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.255915 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-credential-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.259685 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-credential-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.260138 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-fernet-keys\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.260251 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-combined-ca-bundle\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.269548 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-config-data\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.278212 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a761271-dd86-465e-86af-c5ba36c7cb64-scripts\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.280749 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq92d\" (UniqueName: \"kubernetes.io/projected/2a761271-dd86-465e-86af-c5ba36c7cb64-kube-api-access-dq92d\") pod \"keystone-7c85c65b76-hsllc\" (UID: \"2a761271-dd86-465e-86af-c5ba36c7cb64\") " pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.443631 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.463638 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:09:52 crc kubenswrapper[4852]: E0129 12:09:52.463926 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:09:52 crc kubenswrapper[4852]: I0129 12:09:52.871297 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c85c65b76-hsllc"] Jan 29 12:09:52 crc kubenswrapper[4852]: W0129 12:09:52.883836 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a761271_dd86_465e_86af_c5ba36c7cb64.slice/crio-4fad2b3b5faec6feca07b4360b55c47ac45d4ed48711c206152d0ec16c12b997 WatchSource:0}: Error finding container 4fad2b3b5faec6feca07b4360b55c47ac45d4ed48711c206152d0ec16c12b997: Status 404 returned error can't find the container with id 4fad2b3b5faec6feca07b4360b55c47ac45d4ed48711c206152d0ec16c12b997 Jan 29 12:09:53 crc kubenswrapper[4852]: I0129 12:09:53.002901 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c85c65b76-hsllc" event={"ID":"2a761271-dd86-465e-86af-c5ba36c7cb64","Type":"ContainerStarted","Data":"4fad2b3b5faec6feca07b4360b55c47ac45d4ed48711c206152d0ec16c12b997"} Jan 29 12:09:54 crc kubenswrapper[4852]: I0129 12:09:54.014839 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c85c65b76-hsllc" event={"ID":"2a761271-dd86-465e-86af-c5ba36c7cb64","Type":"ContainerStarted","Data":"d4857dde22376ae2ddb9806164fc018f1cc94b8eadb414cea3c77b4f245fd75f"} Jan 29 12:09:54 crc kubenswrapper[4852]: I0129 12:09:54.015728 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:09:54 crc kubenswrapper[4852]: I0129 12:09:54.048843 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7c85c65b76-hsllc" podStartSLOduration=2.048815062 podStartE2EDuration="2.048815062s" podCreationTimestamp="2026-01-29 12:09:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:54.044382004 +0000 UTC m=+5291.261713208" watchObservedRunningTime="2026-01-29 12:09:54.048815062 +0000 UTC m=+5291.266146216" Jan 29 12:10:05 crc kubenswrapper[4852]: I0129 12:10:05.464216 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:10:06 crc kubenswrapper[4852]: I0129 12:10:06.167948 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124"} Jan 29 12:10:23 crc kubenswrapper[4852]: I0129 12:10:23.912439 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7c85c65b76-hsllc" Jan 29 12:10:24 crc kubenswrapper[4852]: I0129 12:10:24.983095 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:24 crc kubenswrapper[4852]: I0129 12:10:24.987821 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:24 crc kubenswrapper[4852]: I0129 12:10:24.994127 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 29 12:10:24 crc kubenswrapper[4852]: I0129 12:10:24.995732 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 29 12:10:24 crc kubenswrapper[4852]: I0129 12:10:24.996410 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-ldxzb" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.018805 4852 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f0d322d-5573-4df3-97fe-e898403c5dff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:10:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:10:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:10:24Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:10:24Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8wgj2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:10:24Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.025259 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.049089 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.049227 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wgj2\" (UniqueName: \"kubernetes.io/projected/8f0d322d-5573-4df3-97fe-e898403c5dff-kube-api-access-8wgj2\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.049269 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.070425 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:25 crc kubenswrapper[4852]: E0129 12:10:25.071305 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-8wgj2 openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="8f0d322d-5573-4df3-97fe-e898403c5dff" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.079796 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.088720 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.090611 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.099362 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.107035 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8f0d322d-5573-4df3-97fe-e898403c5dff" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.150917 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.151090 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.151159 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thct5\" (UniqueName: \"kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.151210 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wgj2\" (UniqueName: \"kubernetes.io/projected/8f0d322d-5573-4df3-97fe-e898403c5dff-kube-api-access-8wgj2\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.151233 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.151259 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.152600 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: E0129 12:10:25.153878 4852 projected.go:194] Error preparing data for projected volume kube-api-access-8wgj2 for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (8f0d322d-5573-4df3-97fe-e898403c5dff) does not match the UID in record. The object might have been deleted and then recreated Jan 29 12:10:25 crc kubenswrapper[4852]: E0129 12:10:25.160905 4852 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8f0d322d-5573-4df3-97fe-e898403c5dff-kube-api-access-8wgj2 podName:8f0d322d-5573-4df3-97fe-e898403c5dff nodeName:}" failed. No retries permitted until 2026-01-29 12:10:25.660867928 +0000 UTC m=+5322.878199062 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-8wgj2" (UniqueName: "kubernetes.io/projected/8f0d322d-5573-4df3-97fe-e898403c5dff-kube-api-access-8wgj2") pod "openstackclient" (UID: "8f0d322d-5573-4df3-97fe-e898403c5dff") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (8f0d322d-5573-4df3-97fe-e898403c5dff) does not match the UID in record. The object might have been deleted and then recreated Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.168228 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret\") pod \"openstackclient\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.252161 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.252271 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thct5\" (UniqueName: \"kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.252325 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.253202 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.255711 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.282315 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thct5\" (UniqueName: \"kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5\") pod \"openstackclient\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.344663 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.349101 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8f0d322d-5573-4df3-97fe-e898403c5dff" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.357004 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.360262 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8f0d322d-5573-4df3-97fe-e898403c5dff" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.409739 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.556402 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config\") pod \"8f0d322d-5573-4df3-97fe-e898403c5dff\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.556892 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret\") pod \"8f0d322d-5573-4df3-97fe-e898403c5dff\" (UID: \"8f0d322d-5573-4df3-97fe-e898403c5dff\") " Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.557477 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wgj2\" (UniqueName: \"kubernetes.io/projected/8f0d322d-5573-4df3-97fe-e898403c5dff-kube-api-access-8wgj2\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.557732 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "8f0d322d-5573-4df3-97fe-e898403c5dff" (UID: "8f0d322d-5573-4df3-97fe-e898403c5dff"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.565624 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "8f0d322d-5573-4df3-97fe-e898403c5dff" (UID: "8f0d322d-5573-4df3-97fe-e898403c5dff"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.659922 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.659959 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8f0d322d-5573-4df3-97fe-e898403c5dff-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:25 crc kubenswrapper[4852]: I0129 12:10:25.876405 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 12:10:26 crc kubenswrapper[4852]: I0129 12:10:26.354437 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:10:26 crc kubenswrapper[4852]: I0129 12:10:26.354458 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"8dc6c019-745a-4ee8-97a2-efd0347d376e","Type":"ContainerStarted","Data":"98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc"} Jan 29 12:10:26 crc kubenswrapper[4852]: I0129 12:10:26.354786 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"8dc6c019-745a-4ee8-97a2-efd0347d376e","Type":"ContainerStarted","Data":"4936652ae5de2437a49e5865ce0c3e202990cdf6e5521d1e6d4bb8fbe587a299"} Jan 29 12:10:26 crc kubenswrapper[4852]: I0129 12:10:26.377634 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.377547328 podStartE2EDuration="1.377547328s" podCreationTimestamp="2026-01-29 12:10:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:10:26.375100208 +0000 UTC m=+5323.592431352" watchObservedRunningTime="2026-01-29 12:10:26.377547328 +0000 UTC m=+5323.594878512" Jan 29 12:10:26 crc kubenswrapper[4852]: I0129 12:10:26.378754 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8f0d322d-5573-4df3-97fe-e898403c5dff" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" Jan 29 12:10:27 crc kubenswrapper[4852]: I0129 12:10:27.476195 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f0d322d-5573-4df3-97fe-e898403c5dff" path="/var/lib/kubelet/pods/8f0d322d-5573-4df3-97fe-e898403c5dff/volumes" Jan 29 12:10:46 crc kubenswrapper[4852]: I0129 12:10:46.072604 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zh6zb"] Jan 29 12:10:46 crc kubenswrapper[4852]: I0129 12:10:46.083254 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zh6zb"] Jan 29 12:10:47 crc kubenswrapper[4852]: I0129 12:10:47.480055 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="779c21f7-b3c4-40ec-86df-ded55bf0c715" path="/var/lib/kubelet/pods/779c21f7-b3c4-40ec-86df-ded55bf0c715/volumes" Jan 29 12:10:49 crc kubenswrapper[4852]: I0129 12:10:49.443150 4852 scope.go:117] "RemoveContainer" containerID="d704e345f13663d19322c3c4143367395f13a59b72b2f8742889a76a50a329b1" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.763732 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.766824 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.777630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.787549 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpsdc\" (UniqueName: \"kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.787806 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.787865 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.889607 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.890017 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpsdc\" (UniqueName: \"kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.890102 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.890409 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.890461 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:56 crc kubenswrapper[4852]: I0129 12:10:56.909077 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpsdc\" (UniqueName: \"kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc\") pod \"redhat-marketplace-rnh68\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:57 crc kubenswrapper[4852]: I0129 12:10:57.126783 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:10:57 crc kubenswrapper[4852]: I0129 12:10:57.595735 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:10:57 crc kubenswrapper[4852]: W0129 12:10:57.605968 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31ebede0_d7ae_49c5_87bb_b7aa8d83fc64.slice/crio-239c5911978231831fbe6674fe18ed438e4c6c1cb8f0694969b6a3aead814991 WatchSource:0}: Error finding container 239c5911978231831fbe6674fe18ed438e4c6c1cb8f0694969b6a3aead814991: Status 404 returned error can't find the container with id 239c5911978231831fbe6674fe18ed438e4c6c1cb8f0694969b6a3aead814991 Jan 29 12:10:57 crc kubenswrapper[4852]: I0129 12:10:57.663863 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerStarted","Data":"239c5911978231831fbe6674fe18ed438e4c6c1cb8f0694969b6a3aead814991"} Jan 29 12:10:58 crc kubenswrapper[4852]: I0129 12:10:58.675865 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerDied","Data":"d10411d146f9645c0ef918a450fbece1aece7231162030967ff36a5012f0d829"} Jan 29 12:10:58 crc kubenswrapper[4852]: I0129 12:10:58.675737 4852 generic.go:334] "Generic (PLEG): container finished" podID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerID="d10411d146f9645c0ef918a450fbece1aece7231162030967ff36a5012f0d829" exitCode=0 Jan 29 12:10:58 crc kubenswrapper[4852]: I0129 12:10:58.680267 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:11:00 crc kubenswrapper[4852]: I0129 12:11:00.697729 4852 generic.go:334] "Generic (PLEG): container finished" podID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerID="348a6b08b2412bd623716e47128179c7f8c9fa50a48e06d8fe7c7594923b4166" exitCode=0 Jan 29 12:11:00 crc kubenswrapper[4852]: I0129 12:11:00.697808 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerDied","Data":"348a6b08b2412bd623716e47128179c7f8c9fa50a48e06d8fe7c7594923b4166"} Jan 29 12:11:01 crc kubenswrapper[4852]: I0129 12:11:01.710556 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerStarted","Data":"38cf8d286d7a1fcaae6df1535915772f7b74366823c18151ba7497efb2adbed0"} Jan 29 12:11:01 crc kubenswrapper[4852]: I0129 12:11:01.735125 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rnh68" podStartSLOduration=3.289752373 podStartE2EDuration="5.735104155s" podCreationTimestamp="2026-01-29 12:10:56 +0000 UTC" firstStartedPulling="2026-01-29 12:10:58.679829559 +0000 UTC m=+5355.897160733" lastFinishedPulling="2026-01-29 12:11:01.125181361 +0000 UTC m=+5358.342512515" observedRunningTime="2026-01-29 12:11:01.731458835 +0000 UTC m=+5358.948789979" watchObservedRunningTime="2026-01-29 12:11:01.735104155 +0000 UTC m=+5358.952435299" Jan 29 12:11:07 crc kubenswrapper[4852]: I0129 12:11:07.127055 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:07 crc kubenswrapper[4852]: I0129 12:11:07.127762 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:07 crc kubenswrapper[4852]: I0129 12:11:07.196695 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:07 crc kubenswrapper[4852]: I0129 12:11:07.824700 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:07 crc kubenswrapper[4852]: I0129 12:11:07.887336 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:11:09 crc kubenswrapper[4852]: I0129 12:11:09.784229 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rnh68" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="registry-server" containerID="cri-o://38cf8d286d7a1fcaae6df1535915772f7b74366823c18151ba7497efb2adbed0" gracePeriod=2 Jan 29 12:11:10 crc kubenswrapper[4852]: I0129 12:11:10.804252 4852 generic.go:334] "Generic (PLEG): container finished" podID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerID="38cf8d286d7a1fcaae6df1535915772f7b74366823c18151ba7497efb2adbed0" exitCode=0 Jan 29 12:11:10 crc kubenswrapper[4852]: I0129 12:11:10.804332 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerDied","Data":"38cf8d286d7a1fcaae6df1535915772f7b74366823c18151ba7497efb2adbed0"} Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.360087 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.473145 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content\") pod \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.473270 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities\") pod \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.473411 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpsdc\" (UniqueName: \"kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc\") pod \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\" (UID: \"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64\") " Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.474280 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities" (OuterVolumeSpecName: "utilities") pod "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" (UID: "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.483806 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc" (OuterVolumeSpecName: "kube-api-access-tpsdc") pod "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" (UID: "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64"). InnerVolumeSpecName "kube-api-access-tpsdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.533334 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" (UID: "31ebede0-d7ae-49c5-87bb-b7aa8d83fc64"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.575014 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.575044 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.575073 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpsdc\" (UniqueName: \"kubernetes.io/projected/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64-kube-api-access-tpsdc\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.825201 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnh68" event={"ID":"31ebede0-d7ae-49c5-87bb-b7aa8d83fc64","Type":"ContainerDied","Data":"239c5911978231831fbe6674fe18ed438e4c6c1cb8f0694969b6a3aead814991"} Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.825491 4852 scope.go:117] "RemoveContainer" containerID="38cf8d286d7a1fcaae6df1535915772f7b74366823c18151ba7497efb2adbed0" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.825379 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnh68" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.884144 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.885717 4852 scope.go:117] "RemoveContainer" containerID="348a6b08b2412bd623716e47128179c7f8c9fa50a48e06d8fe7c7594923b4166" Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.893388 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnh68"] Jan 29 12:11:11 crc kubenswrapper[4852]: I0129 12:11:11.917705 4852 scope.go:117] "RemoveContainer" containerID="d10411d146f9645c0ef918a450fbece1aece7231162030967ff36a5012f0d829" Jan 29 12:11:13 crc kubenswrapper[4852]: I0129 12:11:13.472433 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" path="/var/lib/kubelet/pods/31ebede0-d7ae-49c5-87bb-b7aa8d83fc64/volumes" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.397820 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-2184-account-create-update-cz5tw"] Jan 29 12:12:11 crc kubenswrapper[4852]: E0129 12:12:11.398565 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="extract-utilities" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.398590 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="extract-utilities" Jan 29 12:12:11 crc kubenswrapper[4852]: E0129 12:12:11.398608 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="extract-content" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.398614 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="extract-content" Jan 29 12:12:11 crc kubenswrapper[4852]: E0129 12:12:11.398629 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="registry-server" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.398635 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="registry-server" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.398817 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="31ebede0-d7ae-49c5-87bb-b7aa8d83fc64" containerName="registry-server" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.399391 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.400678 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-xft2d"] Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.401444 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.402007 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.413798 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2184-account-create-update-cz5tw"] Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.426841 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-xft2d"] Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.571754 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.571829 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br4vv\" (UniqueName: \"kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.571976 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tq2c\" (UniqueName: \"kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.572141 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.673720 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.673848 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br4vv\" (UniqueName: \"kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.673895 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tq2c\" (UniqueName: \"kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.673999 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.674854 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.675002 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.697010 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tq2c\" (UniqueName: \"kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c\") pod \"barbican-2184-account-create-update-cz5tw\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.698027 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br4vv\" (UniqueName: \"kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv\") pod \"barbican-db-create-xft2d\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.828001 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:11 crc kubenswrapper[4852]: I0129 12:12:11.837052 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:12 crc kubenswrapper[4852]: I0129 12:12:12.376287 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2184-account-create-update-cz5tw"] Jan 29 12:12:12 crc kubenswrapper[4852]: I0129 12:12:12.424838 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-xft2d"] Jan 29 12:12:12 crc kubenswrapper[4852]: I0129 12:12:12.497054 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xft2d" event={"ID":"55e2b822-3c56-4b25-ab41-83c6c5de861f","Type":"ContainerStarted","Data":"c9f9a3652beb40c643a25767d7edbf1eb4f90c8dda1388400877e01badc550b2"} Jan 29 12:12:12 crc kubenswrapper[4852]: I0129 12:12:12.498663 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2184-account-create-update-cz5tw" event={"ID":"9b4707bf-89be-43fd-a926-b3a96b4b2e74","Type":"ContainerStarted","Data":"15c9015ffc937a06ca35a147de46a1598fd24ba685b710847830b1c28f3fe4b0"} Jan 29 12:12:13 crc kubenswrapper[4852]: I0129 12:12:13.525924 4852 generic.go:334] "Generic (PLEG): container finished" podID="55e2b822-3c56-4b25-ab41-83c6c5de861f" containerID="baf63117abd86389987f03944932929afb47d30abb61d68c6412aa66584f4e24" exitCode=0 Jan 29 12:12:13 crc kubenswrapper[4852]: I0129 12:12:13.526088 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xft2d" event={"ID":"55e2b822-3c56-4b25-ab41-83c6c5de861f","Type":"ContainerDied","Data":"baf63117abd86389987f03944932929afb47d30abb61d68c6412aa66584f4e24"} Jan 29 12:12:13 crc kubenswrapper[4852]: I0129 12:12:13.529142 4852 generic.go:334] "Generic (PLEG): container finished" podID="9b4707bf-89be-43fd-a926-b3a96b4b2e74" containerID="321c1d8766955206df377411efc213c93d8bb42df8b541832488fdd98016f8c4" exitCode=0 Jan 29 12:12:13 crc kubenswrapper[4852]: I0129 12:12:13.529186 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2184-account-create-update-cz5tw" event={"ID":"9b4707bf-89be-43fd-a926-b3a96b4b2e74","Type":"ContainerDied","Data":"321c1d8766955206df377411efc213c93d8bb42df8b541832488fdd98016f8c4"} Jan 29 12:12:14 crc kubenswrapper[4852]: I0129 12:12:14.999312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.005078 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.140375 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br4vv\" (UniqueName: \"kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv\") pod \"55e2b822-3c56-4b25-ab41-83c6c5de861f\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.140570 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts\") pod \"55e2b822-3c56-4b25-ab41-83c6c5de861f\" (UID: \"55e2b822-3c56-4b25-ab41-83c6c5de861f\") " Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.140736 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts\") pod \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.140922 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tq2c\" (UniqueName: \"kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c\") pod \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\" (UID: \"9b4707bf-89be-43fd-a926-b3a96b4b2e74\") " Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.141948 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55e2b822-3c56-4b25-ab41-83c6c5de861f" (UID: "55e2b822-3c56-4b25-ab41-83c6c5de861f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.142061 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b4707bf-89be-43fd-a926-b3a96b4b2e74" (UID: "9b4707bf-89be-43fd-a926-b3a96b4b2e74"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.146994 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c" (OuterVolumeSpecName: "kube-api-access-9tq2c") pod "9b4707bf-89be-43fd-a926-b3a96b4b2e74" (UID: "9b4707bf-89be-43fd-a926-b3a96b4b2e74"). InnerVolumeSpecName "kube-api-access-9tq2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.150357 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv" (OuterVolumeSpecName: "kube-api-access-br4vv") pod "55e2b822-3c56-4b25-ab41-83c6c5de861f" (UID: "55e2b822-3c56-4b25-ab41-83c6c5de861f"). InnerVolumeSpecName "kube-api-access-br4vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.243810 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4707bf-89be-43fd-a926-b3a96b4b2e74-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.243857 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tq2c\" (UniqueName: \"kubernetes.io/projected/9b4707bf-89be-43fd-a926-b3a96b4b2e74-kube-api-access-9tq2c\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.243873 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br4vv\" (UniqueName: \"kubernetes.io/projected/55e2b822-3c56-4b25-ab41-83c6c5de861f-kube-api-access-br4vv\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.243886 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e2b822-3c56-4b25-ab41-83c6c5de861f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.566673 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xft2d" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.566692 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xft2d" event={"ID":"55e2b822-3c56-4b25-ab41-83c6c5de861f","Type":"ContainerDied","Data":"c9f9a3652beb40c643a25767d7edbf1eb4f90c8dda1388400877e01badc550b2"} Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.566744 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9f9a3652beb40c643a25767d7edbf1eb4f90c8dda1388400877e01badc550b2" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.569125 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2184-account-create-update-cz5tw" event={"ID":"9b4707bf-89be-43fd-a926-b3a96b4b2e74","Type":"ContainerDied","Data":"15c9015ffc937a06ca35a147de46a1598fd24ba685b710847830b1c28f3fe4b0"} Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.569150 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2184-account-create-update-cz5tw" Jan 29 12:12:15 crc kubenswrapper[4852]: I0129 12:12:15.569167 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15c9015ffc937a06ca35a147de46a1598fd24ba685b710847830b1c28f3fe4b0" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.764426 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-5bqts"] Jan 29 12:12:16 crc kubenswrapper[4852]: E0129 12:12:16.764799 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b4707bf-89be-43fd-a926-b3a96b4b2e74" containerName="mariadb-account-create-update" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.764812 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b4707bf-89be-43fd-a926-b3a96b4b2e74" containerName="mariadb-account-create-update" Jan 29 12:12:16 crc kubenswrapper[4852]: E0129 12:12:16.764825 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e2b822-3c56-4b25-ab41-83c6c5de861f" containerName="mariadb-database-create" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.764831 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e2b822-3c56-4b25-ab41-83c6c5de861f" containerName="mariadb-database-create" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.764983 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="55e2b822-3c56-4b25-ab41-83c6c5de861f" containerName="mariadb-database-create" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.765249 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b4707bf-89be-43fd-a926-b3a96b4b2e74" containerName="mariadb-account-create-update" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.765790 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.768422 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-l6ph5" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.768624 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.785390 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-5bqts"] Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.790504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.790641 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4lzs\" (UniqueName: \"kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.790695 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.893917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.893981 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4lzs\" (UniqueName: \"kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.894012 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.897666 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.899085 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:16 crc kubenswrapper[4852]: I0129 12:12:16.908871 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4lzs\" (UniqueName: \"kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs\") pod \"barbican-db-sync-5bqts\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:17 crc kubenswrapper[4852]: I0129 12:12:17.085180 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:17 crc kubenswrapper[4852]: I0129 12:12:17.623334 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-5bqts"] Jan 29 12:12:18 crc kubenswrapper[4852]: I0129 12:12:18.612119 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-5bqts" event={"ID":"cc7a2f32-b05e-40f8-ac12-2af1f05a5578","Type":"ContainerStarted","Data":"94920d60be493663535dabd75b56284e4f46e8aee5e1fe34ebbca489e86c5542"} Jan 29 12:12:18 crc kubenswrapper[4852]: I0129 12:12:18.612492 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-5bqts" event={"ID":"cc7a2f32-b05e-40f8-ac12-2af1f05a5578","Type":"ContainerStarted","Data":"520619a0b07b11dc908070ae48b728f4c90b570f75ab34c9b82e2f361d83b1b0"} Jan 29 12:12:18 crc kubenswrapper[4852]: I0129 12:12:18.641050 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-5bqts" podStartSLOduration=2.641024175 podStartE2EDuration="2.641024175s" podCreationTimestamp="2026-01-29 12:12:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:18.629622667 +0000 UTC m=+5435.846953831" watchObservedRunningTime="2026-01-29 12:12:18.641024175 +0000 UTC m=+5435.858355349" Jan 29 12:12:19 crc kubenswrapper[4852]: I0129 12:12:19.623757 4852 generic.go:334] "Generic (PLEG): container finished" podID="cc7a2f32-b05e-40f8-ac12-2af1f05a5578" containerID="94920d60be493663535dabd75b56284e4f46e8aee5e1fe34ebbca489e86c5542" exitCode=0 Jan 29 12:12:19 crc kubenswrapper[4852]: I0129 12:12:19.623834 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-5bqts" event={"ID":"cc7a2f32-b05e-40f8-ac12-2af1f05a5578","Type":"ContainerDied","Data":"94920d60be493663535dabd75b56284e4f46e8aee5e1fe34ebbca489e86c5542"} Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.020991 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.165505 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle\") pod \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.166283 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data\") pod \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.166425 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4lzs\" (UniqueName: \"kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs\") pod \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\" (UID: \"cc7a2f32-b05e-40f8-ac12-2af1f05a5578\") " Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.184333 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs" (OuterVolumeSpecName: "kube-api-access-s4lzs") pod "cc7a2f32-b05e-40f8-ac12-2af1f05a5578" (UID: "cc7a2f32-b05e-40f8-ac12-2af1f05a5578"). InnerVolumeSpecName "kube-api-access-s4lzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.185059 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cc7a2f32-b05e-40f8-ac12-2af1f05a5578" (UID: "cc7a2f32-b05e-40f8-ac12-2af1f05a5578"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.208894 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc7a2f32-b05e-40f8-ac12-2af1f05a5578" (UID: "cc7a2f32-b05e-40f8-ac12-2af1f05a5578"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.268191 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4lzs\" (UniqueName: \"kubernetes.io/projected/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-kube-api-access-s4lzs\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.268226 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.268245 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cc7a2f32-b05e-40f8-ac12-2af1f05a5578-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.643275 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-5bqts" event={"ID":"cc7a2f32-b05e-40f8-ac12-2af1f05a5578","Type":"ContainerDied","Data":"520619a0b07b11dc908070ae48b728f4c90b570f75ab34c9b82e2f361d83b1b0"} Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.643329 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="520619a0b07b11dc908070ae48b728f4c90b570f75ab34c9b82e2f361d83b1b0" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.643362 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-5bqts" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.918903 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5fff45d747-mx7dg"] Jan 29 12:12:21 crc kubenswrapper[4852]: E0129 12:12:21.919711 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc7a2f32-b05e-40f8-ac12-2af1f05a5578" containerName="barbican-db-sync" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.919737 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc7a2f32-b05e-40f8-ac12-2af1f05a5578" containerName="barbican-db-sync" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.919964 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc7a2f32-b05e-40f8-ac12-2af1f05a5578" containerName="barbican-db-sync" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.921110 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.933140 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-l6ph5" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.933456 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.937318 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.970191 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5fff45d747-mx7dg"] Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.988700 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-cddbf6bbd-2jh9z"] Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.990578 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:21 crc kubenswrapper[4852]: I0129 12:12:21.992890 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.033885 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cddbf6bbd-2jh9z"] Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.081009 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.082711 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.091780 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-combined-ca-bundle\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.091988 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-combined-ca-bundle\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092096 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data-custom\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092202 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a17de25-6f02-4f68-88c8-36d3c1450821-logs\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092278 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d7f137b-b458-4df9-9c45-09c744fa6362-logs\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092365 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdwxn\" (UniqueName: \"kubernetes.io/projected/8a17de25-6f02-4f68-88c8-36d3c1450821-kube-api-access-xdwxn\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092436 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data-custom\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092543 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092735 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.092835 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csltj\" (UniqueName: \"kubernetes.io/projected/6d7f137b-b458-4df9-9c45-09c744fa6362-kube-api-access-csltj\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.142326 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.165866 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6ffccc7596-b485b"] Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.168306 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.176537 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6ffccc7596-b485b"] Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.182094 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198216 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a17de25-6f02-4f68-88c8-36d3c1450821-logs\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d7f137b-b458-4df9-9c45-09c744fa6362-logs\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198310 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198350 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdwxn\" (UniqueName: \"kubernetes.io/projected/8a17de25-6f02-4f68-88c8-36d3c1450821-kube-api-access-xdwxn\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data-custom\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198403 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198727 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198839 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198908 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csltj\" (UniqueName: \"kubernetes.io/projected/6d7f137b-b458-4df9-9c45-09c744fa6362-kube-api-access-csltj\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.198944 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d7f137b-b458-4df9-9c45-09c744fa6362-logs\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199001 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-combined-ca-bundle\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199026 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p96s7\" (UniqueName: \"kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199077 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-combined-ca-bundle\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199100 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data-custom\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199122 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.199140 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.207822 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-combined-ca-bundle\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.208625 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data-custom\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.209079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a17de25-6f02-4f68-88c8-36d3c1450821-config-data\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.213499 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-combined-ca-bundle\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.223985 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data-custom\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.225403 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a17de25-6f02-4f68-88c8-36d3c1450821-logs\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.226312 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csltj\" (UniqueName: \"kubernetes.io/projected/6d7f137b-b458-4df9-9c45-09c744fa6362-kube-api-access-csltj\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.233643 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d7f137b-b458-4df9-9c45-09c744fa6362-config-data\") pod \"barbican-worker-5fff45d747-mx7dg\" (UID: \"6d7f137b-b458-4df9-9c45-09c744fa6362\") " pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.242165 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdwxn\" (UniqueName: \"kubernetes.io/projected/8a17de25-6f02-4f68-88c8-36d3c1450821-kube-api-access-xdwxn\") pod \"barbican-keystone-listener-cddbf6bbd-2jh9z\" (UID: \"8a17de25-6f02-4f68-88c8-36d3c1450821\") " pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.271266 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5fff45d747-mx7dg" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.304930 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.304996 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305069 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305111 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305183 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-combined-ca-bundle\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305251 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305286 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjg25\" (UniqueName: \"kubernetes.io/projected/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-kube-api-access-fjg25\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305348 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p96s7\" (UniqueName: \"kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305376 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-logs\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.305400 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data-custom\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.306373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.307629 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.308163 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.315760 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.327806 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.332517 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p96s7\" (UniqueName: \"kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7\") pod \"dnsmasq-dns-5f5f9749f7-dhqbv\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.406603 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-logs\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.407051 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data-custom\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.407133 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-logs\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.407205 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-combined-ca-bundle\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.407304 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.407334 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjg25\" (UniqueName: \"kubernetes.io/projected/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-kube-api-access-fjg25\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.411451 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-combined-ca-bundle\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.413034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data-custom\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.417163 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-config-data\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.423767 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.428618 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjg25\" (UniqueName: \"kubernetes.io/projected/2c7bcc2b-3201-4828-9f3a-44174ff6b77e-kube-api-access-fjg25\") pod \"barbican-api-6ffccc7596-b485b\" (UID: \"2c7bcc2b-3201-4828-9f3a-44174ff6b77e\") " pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.490827 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.866688 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5fff45d747-mx7dg"] Jan 29 12:12:22 crc kubenswrapper[4852]: W0129 12:12:22.871594 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a17de25_6f02_4f68_88c8_36d3c1450821.slice/crio-b848ca6d6927f0d2413242649d746e3c6bc6b9e3cf2d6202fa623719f35145c9 WatchSource:0}: Error finding container b848ca6d6927f0d2413242649d746e3c6bc6b9e3cf2d6202fa623719f35145c9: Status 404 returned error can't find the container with id b848ca6d6927f0d2413242649d746e3c6bc6b9e3cf2d6202fa623719f35145c9 Jan 29 12:12:22 crc kubenswrapper[4852]: I0129 12:12:22.873686 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cddbf6bbd-2jh9z"] Jan 29 12:12:22 crc kubenswrapper[4852]: W0129 12:12:22.880010 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d7f137b_b458_4df9_9c45_09c744fa6362.slice/crio-35455754ce255357cb1baf2e525c705f81d817a20328a1b7d91849fd959477d4 WatchSource:0}: Error finding container 35455754ce255357cb1baf2e525c705f81d817a20328a1b7d91849fd959477d4: Status 404 returned error can't find the container with id 35455754ce255357cb1baf2e525c705f81d817a20328a1b7d91849fd959477d4 Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.098220 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.162348 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6ffccc7596-b485b"] Jan 29 12:12:23 crc kubenswrapper[4852]: W0129 12:12:23.170285 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c7bcc2b_3201_4828_9f3a_44174ff6b77e.slice/crio-40ccc01ce96b3c8360bfe7521736994d9ebd79979e24eb890665aed5d6f9ef1a WatchSource:0}: Error finding container 40ccc01ce96b3c8360bfe7521736994d9ebd79979e24eb890665aed5d6f9ef1a: Status 404 returned error can't find the container with id 40ccc01ce96b3c8360bfe7521736994d9ebd79979e24eb890665aed5d6f9ef1a Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.660926 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ffccc7596-b485b" event={"ID":"2c7bcc2b-3201-4828-9f3a-44174ff6b77e","Type":"ContainerStarted","Data":"cd9c28ca00ecd7eae0857c50f96ac65a1fb2deb0e9e52b773892647a439d527e"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.661307 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ffccc7596-b485b" event={"ID":"2c7bcc2b-3201-4828-9f3a-44174ff6b77e","Type":"ContainerStarted","Data":"40ccc01ce96b3c8360bfe7521736994d9ebd79979e24eb890665aed5d6f9ef1a"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.663421 4852 generic.go:334] "Generic (PLEG): container finished" podID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerID="ebdfe8c19394619c2f974a6d561653aeabfb6290bbdf479fbac2ab9b7560771a" exitCode=0 Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.663474 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" event={"ID":"466b49df-9dc2-40a9-ba14-24b6915979f1","Type":"ContainerDied","Data":"ebdfe8c19394619c2f974a6d561653aeabfb6290bbdf479fbac2ab9b7560771a"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.663507 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" event={"ID":"466b49df-9dc2-40a9-ba14-24b6915979f1","Type":"ContainerStarted","Data":"58adc6303b8c4a2a668f62e200105180d720cda7f06b4e85883e859185e4f030"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.665517 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5fff45d747-mx7dg" event={"ID":"6d7f137b-b458-4df9-9c45-09c744fa6362","Type":"ContainerStarted","Data":"748f211406f8fafd138bb237356f2728c42c82a1bc86b7c17bca1375f1fbbd9b"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.665569 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5fff45d747-mx7dg" event={"ID":"6d7f137b-b458-4df9-9c45-09c744fa6362","Type":"ContainerStarted","Data":"6fd802075d090ac102e1cc3846543143d5fd532eccf8878d164ffca945bb8d56"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.665622 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5fff45d747-mx7dg" event={"ID":"6d7f137b-b458-4df9-9c45-09c744fa6362","Type":"ContainerStarted","Data":"35455754ce255357cb1baf2e525c705f81d817a20328a1b7d91849fd959477d4"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.670515 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" event={"ID":"8a17de25-6f02-4f68-88c8-36d3c1450821","Type":"ContainerStarted","Data":"043fe9be9ac17284c194fcd54c4c4c1daa69b567bceb062caf83b3f3eb812087"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.670544 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" event={"ID":"8a17de25-6f02-4f68-88c8-36d3c1450821","Type":"ContainerStarted","Data":"9206677b3adcff257e437768b54e35d4935c32f8a8ce7d931db23b9172ffc059"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.670554 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" event={"ID":"8a17de25-6f02-4f68-88c8-36d3c1450821","Type":"ContainerStarted","Data":"b848ca6d6927f0d2413242649d746e3c6bc6b9e3cf2d6202fa623719f35145c9"} Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.730448 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-cddbf6bbd-2jh9z" podStartSLOduration=2.730426143 podStartE2EDuration="2.730426143s" podCreationTimestamp="2026-01-29 12:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:23.72540394 +0000 UTC m=+5440.942735074" watchObservedRunningTime="2026-01-29 12:12:23.730426143 +0000 UTC m=+5440.947757267" Jan 29 12:12:23 crc kubenswrapper[4852]: I0129 12:12:23.763900 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5fff45d747-mx7dg" podStartSLOduration=2.7638827790000002 podStartE2EDuration="2.763882779s" podCreationTimestamp="2026-01-29 12:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:23.761797987 +0000 UTC m=+5440.979129121" watchObservedRunningTime="2026-01-29 12:12:23.763882779 +0000 UTC m=+5440.981213913" Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.681824 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6ffccc7596-b485b" event={"ID":"2c7bcc2b-3201-4828-9f3a-44174ff6b77e","Type":"ContainerStarted","Data":"91f32bd146b5254e6f3812ed8fac156e1088819285cc72b841d499c4621416f0"} Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.683701 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.683930 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.689610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" event={"ID":"466b49df-9dc2-40a9-ba14-24b6915979f1","Type":"ContainerStarted","Data":"0e39eeb2e40d22ab3f537a6b4e07ef101b7a7bc667cf046527da36f94b14f278"} Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.689722 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.742595 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6ffccc7596-b485b" podStartSLOduration=2.742558544 podStartE2EDuration="2.742558544s" podCreationTimestamp="2026-01-29 12:12:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:24.711860455 +0000 UTC m=+5441.929191639" watchObservedRunningTime="2026-01-29 12:12:24.742558544 +0000 UTC m=+5441.959889678" Jan 29 12:12:24 crc kubenswrapper[4852]: I0129 12:12:24.744076 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" podStartSLOduration=3.7440659910000003 podStartE2EDuration="3.744065991s" podCreationTimestamp="2026-01-29 12:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:24.735299136 +0000 UTC m=+5441.952630270" watchObservedRunningTime="2026-01-29 12:12:24.744065991 +0000 UTC m=+5441.961397125" Jan 29 12:12:30 crc kubenswrapper[4852]: I0129 12:12:30.017971 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:12:30 crc kubenswrapper[4852]: I0129 12:12:30.018826 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.425843 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.511075 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.512003 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="dnsmasq-dns" containerID="cri-o://0363d19a7740b0482ef69695a7e3d8a0cc62df4729b16d8e69aa603f1f3f57ad" gracePeriod=10 Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.772773 4852 generic.go:334] "Generic (PLEG): container finished" podID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerID="0363d19a7740b0482ef69695a7e3d8a0cc62df4729b16d8e69aa603f1f3f57ad" exitCode=0 Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.772821 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" event={"ID":"c4728eef-3af4-42ef-bfae-110f0bbc61a6","Type":"ContainerDied","Data":"0363d19a7740b0482ef69695a7e3d8a0cc62df4729b16d8e69aa603f1f3f57ad"} Jan 29 12:12:32 crc kubenswrapper[4852]: I0129 12:12:32.991793 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.061274 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config\") pod \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.061683 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb\") pod \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.061719 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb\") pod \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.061747 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc\") pod \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.061865 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh6fg\" (UniqueName: \"kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg\") pod \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\" (UID: \"c4728eef-3af4-42ef-bfae-110f0bbc61a6\") " Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.085235 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg" (OuterVolumeSpecName: "kube-api-access-xh6fg") pod "c4728eef-3af4-42ef-bfae-110f0bbc61a6" (UID: "c4728eef-3af4-42ef-bfae-110f0bbc61a6"). InnerVolumeSpecName "kube-api-access-xh6fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.110558 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config" (OuterVolumeSpecName: "config") pod "c4728eef-3af4-42ef-bfae-110f0bbc61a6" (UID: "c4728eef-3af4-42ef-bfae-110f0bbc61a6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.113323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c4728eef-3af4-42ef-bfae-110f0bbc61a6" (UID: "c4728eef-3af4-42ef-bfae-110f0bbc61a6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.117604 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c4728eef-3af4-42ef-bfae-110f0bbc61a6" (UID: "c4728eef-3af4-42ef-bfae-110f0bbc61a6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.117657 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c4728eef-3af4-42ef-bfae-110f0bbc61a6" (UID: "c4728eef-3af4-42ef-bfae-110f0bbc61a6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.167809 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.167850 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.167860 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.167871 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh6fg\" (UniqueName: \"kubernetes.io/projected/c4728eef-3af4-42ef-bfae-110f0bbc61a6-kube-api-access-xh6fg\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.167881 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4728eef-3af4-42ef-bfae-110f0bbc61a6-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.786834 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" event={"ID":"c4728eef-3af4-42ef-bfae-110f0bbc61a6","Type":"ContainerDied","Data":"c1a7dc38e74aa140c51b1a792dd29907997efad2b99f9ecb97d3600d3641548f"} Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.786890 4852 scope.go:117] "RemoveContainer" containerID="0363d19a7740b0482ef69695a7e3d8a0cc62df4729b16d8e69aa603f1f3f57ad" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.787069 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b648cdf7-qc6g4" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.816481 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.820513 4852 scope.go:117] "RemoveContainer" containerID="33b090e25cf31e848b47afb095ceea8ef307dc3de87a67ea6222b39387157639" Jan 29 12:12:33 crc kubenswrapper[4852]: I0129 12:12:33.824210 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67b648cdf7-qc6g4"] Jan 29 12:12:34 crc kubenswrapper[4852]: I0129 12:12:34.188402 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:34 crc kubenswrapper[4852]: I0129 12:12:34.229305 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6ffccc7596-b485b" Jan 29 12:12:35 crc kubenswrapper[4852]: I0129 12:12:35.490352 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" path="/var/lib/kubelet/pods/c4728eef-3af4-42ef-bfae-110f0bbc61a6/volumes" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.162009 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-27mtn"] Jan 29 12:12:46 crc kubenswrapper[4852]: E0129 12:12:46.166610 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="init" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.166660 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="init" Jan 29 12:12:46 crc kubenswrapper[4852]: E0129 12:12:46.166697 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="dnsmasq-dns" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.166707 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="dnsmasq-dns" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.170895 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4728eef-3af4-42ef-bfae-110f0bbc61a6" containerName="dnsmasq-dns" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.171735 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.178618 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-27mtn"] Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.257472 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3e22-account-create-update-6xdwl"] Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.258479 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.267678 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.273742 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3e22-account-create-update-6xdwl"] Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.300254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vlkx\" (UniqueName: \"kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.300359 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.402072 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.402192 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hhvc\" (UniqueName: \"kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.402270 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vlkx\" (UniqueName: \"kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.402526 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.403424 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.426529 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vlkx\" (UniqueName: \"kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx\") pod \"neutron-db-create-27mtn\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.504069 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hhvc\" (UniqueName: \"kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.504172 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.504934 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.523210 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hhvc\" (UniqueName: \"kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc\") pod \"neutron-3e22-account-create-update-6xdwl\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.541998 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.574330 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.841204 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-27mtn"] Jan 29 12:12:46 crc kubenswrapper[4852]: W0129 12:12:46.853516 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ac2a1fa_4357_4674_92d2_2751732311a2.slice/crio-84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac WatchSource:0}: Error finding container 84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac: Status 404 returned error can't find the container with id 84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac Jan 29 12:12:46 crc kubenswrapper[4852]: I0129 12:12:46.931458 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27mtn" event={"ID":"0ac2a1fa-4357-4674-92d2-2751732311a2","Type":"ContainerStarted","Data":"84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac"} Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.127634 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3e22-account-create-update-6xdwl"] Jan 29 12:12:47 crc kubenswrapper[4852]: W0129 12:12:47.136955 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ddaf8fe_ecdb_47c0_9d25_2bef85eb9ab5.slice/crio-35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f WatchSource:0}: Error finding container 35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f: Status 404 returned error can't find the container with id 35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.942861 4852 generic.go:334] "Generic (PLEG): container finished" podID="7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" containerID="4f6e10e7616a7b8147899e25afb944346027559244b495315c5d5efbf38532ee" exitCode=0 Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.943046 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e22-account-create-update-6xdwl" event={"ID":"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5","Type":"ContainerDied","Data":"4f6e10e7616a7b8147899e25afb944346027559244b495315c5d5efbf38532ee"} Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.943147 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e22-account-create-update-6xdwl" event={"ID":"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5","Type":"ContainerStarted","Data":"35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f"} Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.945084 4852 generic.go:334] "Generic (PLEG): container finished" podID="0ac2a1fa-4357-4674-92d2-2751732311a2" containerID="e968797c0c4e727e777729bc0e355968cd2222a97910fb455bb9d034f94731a0" exitCode=0 Jan 29 12:12:47 crc kubenswrapper[4852]: I0129 12:12:47.945144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27mtn" event={"ID":"0ac2a1fa-4357-4674-92d2-2751732311a2","Type":"ContainerDied","Data":"e968797c0c4e727e777729bc0e355968cd2222a97910fb455bb9d034f94731a0"} Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.380082 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.385516 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.462856 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts\") pod \"0ac2a1fa-4357-4674-92d2-2751732311a2\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.462983 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vlkx\" (UniqueName: \"kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx\") pod \"0ac2a1fa-4357-4674-92d2-2751732311a2\" (UID: \"0ac2a1fa-4357-4674-92d2-2751732311a2\") " Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.463911 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0ac2a1fa-4357-4674-92d2-2751732311a2" (UID: "0ac2a1fa-4357-4674-92d2-2751732311a2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.467842 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx" (OuterVolumeSpecName: "kube-api-access-7vlkx") pod "0ac2a1fa-4357-4674-92d2-2751732311a2" (UID: "0ac2a1fa-4357-4674-92d2-2751732311a2"). InnerVolumeSpecName "kube-api-access-7vlkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.539474 4852 scope.go:117] "RemoveContainer" containerID="7ed555839cc90fb828afcf1fc2303d6ffb0c74ae734b3c086768b4f564f3e551" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.564969 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts\") pod \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.565036 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hhvc\" (UniqueName: \"kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc\") pod \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\" (UID: \"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5\") " Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.565600 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" (UID: "7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.565713 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vlkx\" (UniqueName: \"kubernetes.io/projected/0ac2a1fa-4357-4674-92d2-2751732311a2-kube-api-access-7vlkx\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.565737 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ac2a1fa-4357-4674-92d2-2751732311a2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.565749 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.568484 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc" (OuterVolumeSpecName: "kube-api-access-6hhvc") pod "7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" (UID: "7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5"). InnerVolumeSpecName "kube-api-access-6hhvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.667094 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hhvc\" (UniqueName: \"kubernetes.io/projected/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5-kube-api-access-6hhvc\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.965535 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3e22-account-create-update-6xdwl" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.965569 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3e22-account-create-update-6xdwl" event={"ID":"7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5","Type":"ContainerDied","Data":"35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f"} Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.965642 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35ab8ec6b80c518b372965050dc500e149ceac475a2fc6c14acf9777976da75f" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.968735 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27mtn" event={"ID":"0ac2a1fa-4357-4674-92d2-2751732311a2","Type":"ContainerDied","Data":"84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac"} Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.968773 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84f6cd262f501f8aeaf272532bfdffc0abcc54183d4b147cd014114e0385a1ac" Jan 29 12:12:49 crc kubenswrapper[4852]: I0129 12:12:49.968839 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27mtn" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.519245 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-9gsbw"] Jan 29 12:12:51 crc kubenswrapper[4852]: E0129 12:12:51.519661 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ac2a1fa-4357-4674-92d2-2751732311a2" containerName="mariadb-database-create" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.519676 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ac2a1fa-4357-4674-92d2-2751732311a2" containerName="mariadb-database-create" Jan 29 12:12:51 crc kubenswrapper[4852]: E0129 12:12:51.519711 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" containerName="mariadb-account-create-update" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.519720 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" containerName="mariadb-account-create-update" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.519909 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" containerName="mariadb-account-create-update" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.519934 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ac2a1fa-4357-4674-92d2-2751732311a2" containerName="mariadb-database-create" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.520463 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.526792 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.526931 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j9lqc" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.528662 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.547153 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-9gsbw"] Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.603649 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd8cr\" (UniqueName: \"kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.603734 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.603840 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.705328 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd8cr\" (UniqueName: \"kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.705411 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.705490 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.716868 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.718971 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.722403 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd8cr\" (UniqueName: \"kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr\") pod \"neutron-db-sync-9gsbw\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:51 crc kubenswrapper[4852]: I0129 12:12:51.890436 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:12:52 crc kubenswrapper[4852]: I0129 12:12:52.391956 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-9gsbw"] Jan 29 12:12:52 crc kubenswrapper[4852]: I0129 12:12:52.996335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9gsbw" event={"ID":"32686705-bf27-4ee5-9d96-39e17e9512a3","Type":"ContainerStarted","Data":"4df0cd6423b578dfa032b8bbf476910d03cf16e621f8ddbb5016711471c56149"} Jan 29 12:12:52 crc kubenswrapper[4852]: I0129 12:12:52.996680 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9gsbw" event={"ID":"32686705-bf27-4ee5-9d96-39e17e9512a3","Type":"ContainerStarted","Data":"52db544938e2320deb39a1610deedbefef58cf591d08806c91c4ff64f8d69d36"} Jan 29 12:12:53 crc kubenswrapper[4852]: I0129 12:12:53.017072 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-9gsbw" podStartSLOduration=2.017049486 podStartE2EDuration="2.017049486s" podCreationTimestamp="2026-01-29 12:12:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:53.012471324 +0000 UTC m=+5470.229802448" watchObservedRunningTime="2026-01-29 12:12:53.017049486 +0000 UTC m=+5470.234380620" Jan 29 12:12:59 crc kubenswrapper[4852]: I0129 12:12:59.046355 4852 generic.go:334] "Generic (PLEG): container finished" podID="32686705-bf27-4ee5-9d96-39e17e9512a3" containerID="4df0cd6423b578dfa032b8bbf476910d03cf16e621f8ddbb5016711471c56149" exitCode=0 Jan 29 12:12:59 crc kubenswrapper[4852]: I0129 12:12:59.046438 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9gsbw" event={"ID":"32686705-bf27-4ee5-9d96-39e17e9512a3","Type":"ContainerDied","Data":"4df0cd6423b578dfa032b8bbf476910d03cf16e621f8ddbb5016711471c56149"} Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.016896 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.016945 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.397781 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.478980 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd8cr\" (UniqueName: \"kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr\") pod \"32686705-bf27-4ee5-9d96-39e17e9512a3\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.479159 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle\") pod \"32686705-bf27-4ee5-9d96-39e17e9512a3\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.479189 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config\") pod \"32686705-bf27-4ee5-9d96-39e17e9512a3\" (UID: \"32686705-bf27-4ee5-9d96-39e17e9512a3\") " Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.485435 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr" (OuterVolumeSpecName: "kube-api-access-pd8cr") pod "32686705-bf27-4ee5-9d96-39e17e9512a3" (UID: "32686705-bf27-4ee5-9d96-39e17e9512a3"). InnerVolumeSpecName "kube-api-access-pd8cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.505019 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config" (OuterVolumeSpecName: "config") pod "32686705-bf27-4ee5-9d96-39e17e9512a3" (UID: "32686705-bf27-4ee5-9d96-39e17e9512a3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.512834 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32686705-bf27-4ee5-9d96-39e17e9512a3" (UID: "32686705-bf27-4ee5-9d96-39e17e9512a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.581775 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd8cr\" (UniqueName: \"kubernetes.io/projected/32686705-bf27-4ee5-9d96-39e17e9512a3-kube-api-access-pd8cr\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.581815 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:00 crc kubenswrapper[4852]: I0129 12:13:00.581828 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/32686705-bf27-4ee5-9d96-39e17e9512a3-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.067972 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9gsbw" event={"ID":"32686705-bf27-4ee5-9d96-39e17e9512a3","Type":"ContainerDied","Data":"52db544938e2320deb39a1610deedbefef58cf591d08806c91c4ff64f8d69d36"} Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.068289 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52db544938e2320deb39a1610deedbefef58cf591d08806c91c4ff64f8d69d36" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.068338 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9gsbw" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.242236 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:13:01 crc kubenswrapper[4852]: E0129 12:13:01.242577 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32686705-bf27-4ee5-9d96-39e17e9512a3" containerName="neutron-db-sync" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.242609 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="32686705-bf27-4ee5-9d96-39e17e9512a3" containerName="neutron-db-sync" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.242803 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="32686705-bf27-4ee5-9d96-39e17e9512a3" containerName="neutron-db-sync" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.245813 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.275275 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.349084 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-65cc64b9b9-ppj6r"] Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.351115 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.353477 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j9lqc" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.353936 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.354052 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.375441 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-65cc64b9b9-ppj6r"] Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.397290 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.397359 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.397409 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.397479 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk94k\" (UniqueName: \"kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.397508 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.498958 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-httpd-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499030 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499073 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499121 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499177 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-combined-ca-bundle\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499213 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499266 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk94k\" (UniqueName: \"kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499293 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.499336 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sftgr\" (UniqueName: \"kubernetes.io/projected/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-kube-api-access-sftgr\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.500144 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.500411 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.500419 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.500525 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.516456 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk94k\" (UniqueName: \"kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k\") pod \"dnsmasq-dns-774d9c6bc7-vhqpn\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.573287 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.601149 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-combined-ca-bundle\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.601208 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.601300 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sftgr\" (UniqueName: \"kubernetes.io/projected/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-kube-api-access-sftgr\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.601383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-httpd-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.609894 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-httpd-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.610309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-combined-ca-bundle\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.612426 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-config\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.625964 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sftgr\" (UniqueName: \"kubernetes.io/projected/38a3afdc-64d5-4c8b-ae4f-87d376c11ad1-kube-api-access-sftgr\") pod \"neutron-65cc64b9b9-ppj6r\" (UID: \"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1\") " pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:01 crc kubenswrapper[4852]: I0129 12:13:01.674786 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:02 crc kubenswrapper[4852]: I0129 12:13:02.139366 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:13:02 crc kubenswrapper[4852]: W0129 12:13:02.156785 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd9accf7_0c08_4859_871b_5251747e0edb.slice/crio-ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56 WatchSource:0}: Error finding container ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56: Status 404 returned error can't find the container with id ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56 Jan 29 12:13:02 crc kubenswrapper[4852]: I0129 12:13:02.314981 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-65cc64b9b9-ppj6r"] Jan 29 12:13:02 crc kubenswrapper[4852]: W0129 12:13:02.325515 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38a3afdc_64d5_4c8b_ae4f_87d376c11ad1.slice/crio-00d0f1fb31b1c344c7e19592fadf4e492854ae4bb6c7b5e4437e6b9cd82c1abe WatchSource:0}: Error finding container 00d0f1fb31b1c344c7e19592fadf4e492854ae4bb6c7b5e4437e6b9cd82c1abe: Status 404 returned error can't find the container with id 00d0f1fb31b1c344c7e19592fadf4e492854ae4bb6c7b5e4437e6b9cd82c1abe Jan 29 12:13:03 crc kubenswrapper[4852]: I0129 12:13:03.092066 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65cc64b9b9-ppj6r" event={"ID":"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1","Type":"ContainerStarted","Data":"5bee3c11045fbc38c1bdde86aaafa7c1e00cd21e685d362b5bdc4306a7cb5fc9"} Jan 29 12:13:03 crc kubenswrapper[4852]: I0129 12:13:03.092625 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65cc64b9b9-ppj6r" event={"ID":"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1","Type":"ContainerStarted","Data":"00d0f1fb31b1c344c7e19592fadf4e492854ae4bb6c7b5e4437e6b9cd82c1abe"} Jan 29 12:13:03 crc kubenswrapper[4852]: I0129 12:13:03.093397 4852 generic.go:334] "Generic (PLEG): container finished" podID="bd9accf7-0c08-4859-871b-5251747e0edb" containerID="d37997caba7afa88605b4003687b01569d4a04315e8d1dcafd6d2f75549d2660" exitCode=0 Jan 29 12:13:03 crc kubenswrapper[4852]: I0129 12:13:03.093424 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" event={"ID":"bd9accf7-0c08-4859-871b-5251747e0edb","Type":"ContainerDied","Data":"d37997caba7afa88605b4003687b01569d4a04315e8d1dcafd6d2f75549d2660"} Jan 29 12:13:03 crc kubenswrapper[4852]: I0129 12:13:03.093444 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" event={"ID":"bd9accf7-0c08-4859-871b-5251747e0edb","Type":"ContainerStarted","Data":"ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56"} Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.106572 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" event={"ID":"bd9accf7-0c08-4859-871b-5251747e0edb","Type":"ContainerStarted","Data":"2849f04a8e34508e1df10a2a6ca28cfd4152242f9b10d8b90881fe966fba6806"} Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.107227 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.109274 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65cc64b9b9-ppj6r" event={"ID":"38a3afdc-64d5-4c8b-ae4f-87d376c11ad1","Type":"ContainerStarted","Data":"5d48fc63409ab57f2ca8da264a7621876e15d783f198c2c2139c16d4bd920691"} Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.109439 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.126999 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" podStartSLOduration=3.126975529 podStartE2EDuration="3.126975529s" podCreationTimestamp="2026-01-29 12:13:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:04.125347808 +0000 UTC m=+5481.342678972" watchObservedRunningTime="2026-01-29 12:13:04.126975529 +0000 UTC m=+5481.344306703" Jan 29 12:13:04 crc kubenswrapper[4852]: I0129 12:13:04.151151 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-65cc64b9b9-ppj6r" podStartSLOduration=3.151125597 podStartE2EDuration="3.151125597s" podCreationTimestamp="2026-01-29 12:13:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:04.1459354 +0000 UTC m=+5481.363266584" watchObservedRunningTime="2026-01-29 12:13:04.151125597 +0000 UTC m=+5481.368456741" Jan 29 12:13:11 crc kubenswrapper[4852]: I0129 12:13:11.575610 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:13:11 crc kubenswrapper[4852]: I0129 12:13:11.662847 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:13:11 crc kubenswrapper[4852]: I0129 12:13:11.663094 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="dnsmasq-dns" containerID="cri-o://0e39eeb2e40d22ab3f537a6b4e07ef101b7a7bc667cf046527da36f94b14f278" gracePeriod=10 Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.215134 4852 generic.go:334] "Generic (PLEG): container finished" podID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerID="0e39eeb2e40d22ab3f537a6b4e07ef101b7a7bc667cf046527da36f94b14f278" exitCode=0 Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.215225 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" event={"ID":"466b49df-9dc2-40a9-ba14-24b6915979f1","Type":"ContainerDied","Data":"0e39eeb2e40d22ab3f537a6b4e07ef101b7a7bc667cf046527da36f94b14f278"} Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.425006 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.28:5353: connect: connection refused" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.711715 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.876130 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb\") pod \"466b49df-9dc2-40a9-ba14-24b6915979f1\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.876373 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc\") pod \"466b49df-9dc2-40a9-ba14-24b6915979f1\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.876531 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config\") pod \"466b49df-9dc2-40a9-ba14-24b6915979f1\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.876657 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb\") pod \"466b49df-9dc2-40a9-ba14-24b6915979f1\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.876715 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p96s7\" (UniqueName: \"kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7\") pod \"466b49df-9dc2-40a9-ba14-24b6915979f1\" (UID: \"466b49df-9dc2-40a9-ba14-24b6915979f1\") " Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.897371 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7" (OuterVolumeSpecName: "kube-api-access-p96s7") pod "466b49df-9dc2-40a9-ba14-24b6915979f1" (UID: "466b49df-9dc2-40a9-ba14-24b6915979f1"). InnerVolumeSpecName "kube-api-access-p96s7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.938002 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "466b49df-9dc2-40a9-ba14-24b6915979f1" (UID: "466b49df-9dc2-40a9-ba14-24b6915979f1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.938317 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "466b49df-9dc2-40a9-ba14-24b6915979f1" (UID: "466b49df-9dc2-40a9-ba14-24b6915979f1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.938482 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config" (OuterVolumeSpecName: "config") pod "466b49df-9dc2-40a9-ba14-24b6915979f1" (UID: "466b49df-9dc2-40a9-ba14-24b6915979f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.939707 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "466b49df-9dc2-40a9-ba14-24b6915979f1" (UID: "466b49df-9dc2-40a9-ba14-24b6915979f1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.978454 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.978711 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.978790 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p96s7\" (UniqueName: \"kubernetes.io/projected/466b49df-9dc2-40a9-ba14-24b6915979f1-kube-api-access-p96s7\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.978859 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:12 crc kubenswrapper[4852]: I0129 12:13:12.978915 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/466b49df-9dc2-40a9-ba14-24b6915979f1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.226908 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" event={"ID":"466b49df-9dc2-40a9-ba14-24b6915979f1","Type":"ContainerDied","Data":"58adc6303b8c4a2a668f62e200105180d720cda7f06b4e85883e859185e4f030"} Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.226956 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5f9749f7-dhqbv" Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.227316 4852 scope.go:117] "RemoveContainer" containerID="0e39eeb2e40d22ab3f537a6b4e07ef101b7a7bc667cf046527da36f94b14f278" Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.254919 4852 scope.go:117] "RemoveContainer" containerID="ebdfe8c19394619c2f974a6d561653aeabfb6290bbdf479fbac2ab9b7560771a" Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.273871 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.282772 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f5f9749f7-dhqbv"] Jan 29 12:13:13 crc kubenswrapper[4852]: I0129 12:13:13.490968 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" path="/var/lib/kubelet/pods/466b49df-9dc2-40a9-ba14-24b6915979f1/volumes" Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.016869 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.017744 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.017833 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.019036 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.019148 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124" gracePeriod=600 Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.401992 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124" exitCode=0 Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.402107 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124"} Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.402352 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a"} Jan 29 12:13:30 crc kubenswrapper[4852]: I0129 12:13:30.402376 4852 scope.go:117] "RemoveContainer" containerID="a03a4eef4bdf597bdd4f69132a2af6bcffa1e0c925d14aab6e13b9df5f231a87" Jan 29 12:13:31 crc kubenswrapper[4852]: I0129 12:13:31.687144 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-65cc64b9b9-ppj6r" Jan 29 12:13:37 crc kubenswrapper[4852]: E0129 12:13:37.382772 4852 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:49194->38.102.83.23:36165: write tcp 38.102.83.23:49194->38.102.83.23:36165: write: broken pipe Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.810298 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-h4dh6"] Jan 29 12:13:38 crc kubenswrapper[4852]: E0129 12:13:38.810673 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="init" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.810686 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="init" Jan 29 12:13:38 crc kubenswrapper[4852]: E0129 12:13:38.810720 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="dnsmasq-dns" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.810726 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="dnsmasq-dns" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.810871 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="466b49df-9dc2-40a9-ba14-24b6915979f1" containerName="dnsmasq-dns" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.811471 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.822915 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h4dh6"] Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.884333 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szhtl\" (UniqueName: \"kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.884436 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.915632 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-1259-account-create-update-5d7vf"] Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.916873 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.920281 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.928870 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1259-account-create-update-5d7vf"] Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.985678 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szhtl\" (UniqueName: \"kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.985784 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:38 crc kubenswrapper[4852]: I0129 12:13:38.986612 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.015789 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szhtl\" (UniqueName: \"kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl\") pod \"glance-db-create-h4dh6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.087730 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxwqz\" (UniqueName: \"kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.087800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.133719 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.197449 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxwqz\" (UniqueName: \"kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.197511 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.202993 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.217169 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxwqz\" (UniqueName: \"kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz\") pod \"glance-1259-account-create-update-5d7vf\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.236286 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.604097 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h4dh6"] Jan 29 12:13:39 crc kubenswrapper[4852]: I0129 12:13:39.735565 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1259-account-create-update-5d7vf"] Jan 29 12:13:39 crc kubenswrapper[4852]: W0129 12:13:39.736422 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd14a7f30_2ff8_4ea0_b52d_f345571b2d79.slice/crio-ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761 WatchSource:0}: Error finding container ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761: Status 404 returned error can't find the container with id ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761 Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.504611 4852 generic.go:334] "Generic (PLEG): container finished" podID="bd267b59-5fce-4fe4-850f-d86ca19bc1e6" containerID="1364c4d996e4acc7b384a9241c5a041f99c2d44d11d9db2d301449127609f632" exitCode=0 Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.504695 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h4dh6" event={"ID":"bd267b59-5fce-4fe4-850f-d86ca19bc1e6","Type":"ContainerDied","Data":"1364c4d996e4acc7b384a9241c5a041f99c2d44d11d9db2d301449127609f632"} Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.504993 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h4dh6" event={"ID":"bd267b59-5fce-4fe4-850f-d86ca19bc1e6","Type":"ContainerStarted","Data":"76b07b89222a295da2c551224c4a8dea7a03abc08d7b52969a0447d990b9991a"} Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.506913 4852 generic.go:334] "Generic (PLEG): container finished" podID="d14a7f30-2ff8-4ea0-b52d-f345571b2d79" containerID="714fe3408bbf0103f8209f6aabe1724085938e274b10943b38023ae629939271" exitCode=0 Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.506957 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1259-account-create-update-5d7vf" event={"ID":"d14a7f30-2ff8-4ea0-b52d-f345571b2d79","Type":"ContainerDied","Data":"714fe3408bbf0103f8209f6aabe1724085938e274b10943b38023ae629939271"} Jan 29 12:13:40 crc kubenswrapper[4852]: I0129 12:13:40.506981 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1259-account-create-update-5d7vf" event={"ID":"d14a7f30-2ff8-4ea0-b52d-f345571b2d79","Type":"ContainerStarted","Data":"ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761"} Jan 29 12:13:41 crc kubenswrapper[4852]: I0129 12:13:41.921767 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:41 crc kubenswrapper[4852]: I0129 12:13:41.933303 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.048687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szhtl\" (UniqueName: \"kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl\") pod \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.048801 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts\") pod \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\" (UID: \"bd267b59-5fce-4fe4-850f-d86ca19bc1e6\") " Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.048893 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts\") pod \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.048973 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxwqz\" (UniqueName: \"kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz\") pod \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\" (UID: \"d14a7f30-2ff8-4ea0-b52d-f345571b2d79\") " Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.049786 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d14a7f30-2ff8-4ea0-b52d-f345571b2d79" (UID: "d14a7f30-2ff8-4ea0-b52d-f345571b2d79"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.050183 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd267b59-5fce-4fe4-850f-d86ca19bc1e6" (UID: "bd267b59-5fce-4fe4-850f-d86ca19bc1e6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.055770 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl" (OuterVolumeSpecName: "kube-api-access-szhtl") pod "bd267b59-5fce-4fe4-850f-d86ca19bc1e6" (UID: "bd267b59-5fce-4fe4-850f-d86ca19bc1e6"). InnerVolumeSpecName "kube-api-access-szhtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.057106 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz" (OuterVolumeSpecName: "kube-api-access-nxwqz") pod "d14a7f30-2ff8-4ea0-b52d-f345571b2d79" (UID: "d14a7f30-2ff8-4ea0-b52d-f345571b2d79"). InnerVolumeSpecName "kube-api-access-nxwqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.151687 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.151731 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxwqz\" (UniqueName: \"kubernetes.io/projected/d14a7f30-2ff8-4ea0-b52d-f345571b2d79-kube-api-access-nxwqz\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.151746 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szhtl\" (UniqueName: \"kubernetes.io/projected/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-kube-api-access-szhtl\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.151760 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd267b59-5fce-4fe4-850f-d86ca19bc1e6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.540130 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1259-account-create-update-5d7vf" event={"ID":"d14a7f30-2ff8-4ea0-b52d-f345571b2d79","Type":"ContainerDied","Data":"ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761"} Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.540176 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1259-account-create-update-5d7vf" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.540192 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec0417358963e74f14fff85417b6309136a65e5ba08fffe2a627a024b31d2761" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.543521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h4dh6" event={"ID":"bd267b59-5fce-4fe4-850f-d86ca19bc1e6","Type":"ContainerDied","Data":"76b07b89222a295da2c551224c4a8dea7a03abc08d7b52969a0447d990b9991a"} Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.543555 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76b07b89222a295da2c551224c4a8dea7a03abc08d7b52969a0447d990b9991a" Jan 29 12:13:42 crc kubenswrapper[4852]: I0129 12:13:42.543657 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h4dh6" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.164025 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-mtgj8"] Jan 29 12:13:44 crc kubenswrapper[4852]: E0129 12:13:44.164695 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d14a7f30-2ff8-4ea0-b52d-f345571b2d79" containerName="mariadb-account-create-update" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.164710 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d14a7f30-2ff8-4ea0-b52d-f345571b2d79" containerName="mariadb-account-create-update" Jan 29 12:13:44 crc kubenswrapper[4852]: E0129 12:13:44.164727 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd267b59-5fce-4fe4-850f-d86ca19bc1e6" containerName="mariadb-database-create" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.164732 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd267b59-5fce-4fe4-850f-d86ca19bc1e6" containerName="mariadb-database-create" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.164876 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd267b59-5fce-4fe4-850f-d86ca19bc1e6" containerName="mariadb-database-create" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.164891 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d14a7f30-2ff8-4ea0-b52d-f345571b2d79" containerName="mariadb-account-create-update" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.165482 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.168513 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-88bfk" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.169260 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.193732 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mtgj8"] Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.289785 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csb57\" (UniqueName: \"kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.289847 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.290105 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.290176 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.391661 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csb57\" (UniqueName: \"kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.391705 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.391772 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.391798 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.397275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.408229 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.408447 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.415278 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csb57\" (UniqueName: \"kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57\") pod \"glance-db-sync-mtgj8\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:44 crc kubenswrapper[4852]: I0129 12:13:44.483630 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:45 crc kubenswrapper[4852]: I0129 12:13:45.000690 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mtgj8"] Jan 29 12:13:45 crc kubenswrapper[4852]: W0129 12:13:45.004252 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54b7c49e_9d11_4519_8721_9838904b77db.slice/crio-a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5 WatchSource:0}: Error finding container a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5: Status 404 returned error can't find the container with id a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5 Jan 29 12:13:45 crc kubenswrapper[4852]: I0129 12:13:45.571856 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mtgj8" event={"ID":"54b7c49e-9d11-4519-8721-9838904b77db","Type":"ContainerStarted","Data":"a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5"} Jan 29 12:13:46 crc kubenswrapper[4852]: I0129 12:13:46.580421 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mtgj8" event={"ID":"54b7c49e-9d11-4519-8721-9838904b77db","Type":"ContainerStarted","Data":"03168d6d9f0d6ba525ce6668927c5a310220c0f48476b640626c13fbcc2080ae"} Jan 29 12:13:46 crc kubenswrapper[4852]: I0129 12:13:46.602512 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-mtgj8" podStartSLOduration=2.602492269 podStartE2EDuration="2.602492269s" podCreationTimestamp="2026-01-29 12:13:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:46.59555624 +0000 UTC m=+5523.812887384" watchObservedRunningTime="2026-01-29 12:13:46.602492269 +0000 UTC m=+5523.819823403" Jan 29 12:13:49 crc kubenswrapper[4852]: I0129 12:13:49.611947 4852 generic.go:334] "Generic (PLEG): container finished" podID="54b7c49e-9d11-4519-8721-9838904b77db" containerID="03168d6d9f0d6ba525ce6668927c5a310220c0f48476b640626c13fbcc2080ae" exitCode=0 Jan 29 12:13:49 crc kubenswrapper[4852]: I0129 12:13:49.612046 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mtgj8" event={"ID":"54b7c49e-9d11-4519-8721-9838904b77db","Type":"ContainerDied","Data":"03168d6d9f0d6ba525ce6668927c5a310220c0f48476b640626c13fbcc2080ae"} Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.706216 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.708396 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.719753 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.719808 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.719830 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z22xs\" (UniqueName: \"kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.747646 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.821613 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.821706 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.821738 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z22xs\" (UniqueName: \"kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.822221 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.822430 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:50 crc kubenswrapper[4852]: I0129 12:13:50.844880 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z22xs\" (UniqueName: \"kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs\") pod \"community-operators-nr88d\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.038937 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.045899 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.126134 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle\") pod \"54b7c49e-9d11-4519-8721-9838904b77db\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.126180 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data\") pod \"54b7c49e-9d11-4519-8721-9838904b77db\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.126227 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csb57\" (UniqueName: \"kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57\") pod \"54b7c49e-9d11-4519-8721-9838904b77db\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.126282 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data\") pod \"54b7c49e-9d11-4519-8721-9838904b77db\" (UID: \"54b7c49e-9d11-4519-8721-9838904b77db\") " Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.136088 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57" (OuterVolumeSpecName: "kube-api-access-csb57") pod "54b7c49e-9d11-4519-8721-9838904b77db" (UID: "54b7c49e-9d11-4519-8721-9838904b77db"). InnerVolumeSpecName "kube-api-access-csb57". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.143142 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "54b7c49e-9d11-4519-8721-9838904b77db" (UID: "54b7c49e-9d11-4519-8721-9838904b77db"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.159239 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54b7c49e-9d11-4519-8721-9838904b77db" (UID: "54b7c49e-9d11-4519-8721-9838904b77db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.194480 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data" (OuterVolumeSpecName: "config-data") pod "54b7c49e-9d11-4519-8721-9838904b77db" (UID: "54b7c49e-9d11-4519-8721-9838904b77db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.227601 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.227636 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.227648 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b7c49e-9d11-4519-8721-9838904b77db-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.229263 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csb57\" (UniqueName: \"kubernetes.io/projected/54b7c49e-9d11-4519-8721-9838904b77db-kube-api-access-csb57\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.539031 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.628775 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerStarted","Data":"61e635b3b97e6c9ff07de37d05b82e620a8a5379bb69fbc7f0c0947357414efe"} Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.630149 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mtgj8" event={"ID":"54b7c49e-9d11-4519-8721-9838904b77db","Type":"ContainerDied","Data":"a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5"} Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.630179 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4ae2910f8be0754b1a2a8d11e82361faaa97ffd00d711fcc31ecfabe32515d5" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.630337 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mtgj8" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.711517 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:13:51 crc kubenswrapper[4852]: E0129 12:13:51.711950 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b7c49e-9d11-4519-8721-9838904b77db" containerName="glance-db-sync" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.711964 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b7c49e-9d11-4519-8721-9838904b77db" containerName="glance-db-sync" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.712135 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b7c49e-9d11-4519-8721-9838904b77db" containerName="glance-db-sync" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.713322 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.721700 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.849818 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.850171 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.850297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n8gr\" (UniqueName: \"kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.952071 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n8gr\" (UniqueName: \"kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.952217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.952243 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.952880 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.953456 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:51 crc kubenswrapper[4852]: I0129 12:13:51.978727 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n8gr\" (UniqueName: \"kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr\") pod \"redhat-operators-cjwlq\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.041728 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.043498 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.067671 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.069085 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.078361 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.078745 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.078940 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-88bfk" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.078944 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.097784 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.108637 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.132454 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.155099 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs92r\" (UniqueName: \"kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.155150 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.155170 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.155258 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.155282 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.182686 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.184735 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.187815 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.194409 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.258979 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259208 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259292 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs92r\" (UniqueName: \"kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259417 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259766 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259875 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.259943 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z7k2\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260215 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260279 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260350 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260434 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260464 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.260707 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.261258 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.261723 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.261846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.277551 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs92r\" (UniqueName: \"kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r\") pod \"dnsmasq-dns-ff8fb85f5-ztczg\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361689 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361758 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361789 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361817 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361840 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361868 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z7k2\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361919 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjjq8\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361948 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.361984 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.362024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.362050 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.362075 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.362124 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.362157 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.363301 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.364230 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.369884 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.369941 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.370617 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.371017 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.372573 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.389700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z7k2\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2\") pod \"glance-default-external-api-0\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.397150 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465432 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465530 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465573 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465608 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465630 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465678 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjjq8\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.465707 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.466069 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.466921 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.475187 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.475879 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.476002 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.476392 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.489219 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjjq8\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8\") pod \"glance-default-internal-api-0\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.563550 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.683300 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerID="6d437c18a5e5f2c9c905fed30d82c604f1c2e5653edf35ff63e435dc68e76620" exitCode=0 Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.683692 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerDied","Data":"6d437c18a5e5f2c9c905fed30d82c604f1c2e5653edf35ff63e435dc68e76620"} Jan 29 12:13:52 crc kubenswrapper[4852]: I0129 12:13:52.750008 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.040155 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:13:53 crc kubenswrapper[4852]: W0129 12:13:53.056956 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9f1f324_73df_4de1_b3de_2e5b9e8ac4cd.slice/crio-32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e WatchSource:0}: Error finding container 32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e: Status 404 returned error can't find the container with id 32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.062103 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:53 crc kubenswrapper[4852]: W0129 12:13:53.083364 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57eeed06_46d9_40a6_ad69_0667f362561d.slice/crio-251a0229353960b8e33175976ff59cf67cfe5767a323c4fa1a63215062f0a63a WatchSource:0}: Error finding container 251a0229353960b8e33175976ff59cf67cfe5767a323c4fa1a63215062f0a63a: Status 404 returned error can't find the container with id 251a0229353960b8e33175976ff59cf67cfe5767a323c4fa1a63215062f0a63a Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.278205 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:53 crc kubenswrapper[4852]: E0129 12:13:53.573954 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f6de4b9_d582_46ec_8c45_3df23b9ad76f.slice/crio-72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f6de4b9_d582_46ec_8c45_3df23b9ad76f.slice/crio-conmon-72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.617364 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.725509 4852 generic.go:334] "Generic (PLEG): container finished" podID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerID="c15b10e98fde22342b6630391d8e74a7f2d1c30a4e47629ec715891a14be63f9" exitCode=0 Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.725628 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" event={"ID":"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd","Type":"ContainerDied","Data":"c15b10e98fde22342b6630391d8e74a7f2d1c30a4e47629ec715891a14be63f9"} Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.725663 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" event={"ID":"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd","Type":"ContainerStarted","Data":"32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e"} Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.731057 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerStarted","Data":"251a0229353960b8e33175976ff59cf67cfe5767a323c4fa1a63215062f0a63a"} Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.754170 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerID="72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150" exitCode=0 Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.755413 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerDied","Data":"72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150"} Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.755464 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerStarted","Data":"e99b2c4f1845716bffe79f97c022645831f638022d8093bb6e053f0f4db66426"} Jan 29 12:13:53 crc kubenswrapper[4852]: I0129 12:13:53.777465 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerStarted","Data":"2c0ea8c884bd6c9a80a3329c71eb4f126cf3d57db226dcb76e22755019ee69da"} Jan 29 12:13:54 crc kubenswrapper[4852]: I0129 12:13:54.787939 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerStarted","Data":"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0"} Jan 29 12:13:54 crc kubenswrapper[4852]: I0129 12:13:54.790357 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerStarted","Data":"08ccb93f46422dd1f933ec7153a7e845e956a52ebee034d78c90b916cff909b6"} Jan 29 12:13:54 crc kubenswrapper[4852]: I0129 12:13:54.794691 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" event={"ID":"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd","Type":"ContainerStarted","Data":"82ee2f082f25cf881e48c0dc5a07227b1115d3228ec25ed656eb037a912baa5e"} Jan 29 12:13:54 crc kubenswrapper[4852]: I0129 12:13:54.796476 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerStarted","Data":"c2ffa56cd139f004c7d439472272dd3023bf48752cbb1e5ed6fdb3bea0bf1ac7"} Jan 29 12:13:54 crc kubenswrapper[4852]: I0129 12:13:54.831846 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" podStartSLOduration=2.8318278169999997 podStartE2EDuration="2.831827817s" podCreationTimestamp="2026-01-29 12:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:54.827524032 +0000 UTC m=+5532.044855166" watchObservedRunningTime="2026-01-29 12:13:54.831827817 +0000 UTC m=+5532.049158951" Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.795339 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.813166 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerStarted","Data":"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df"} Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.814898 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerID="08ccb93f46422dd1f933ec7153a7e845e956a52ebee034d78c90b916cff909b6" exitCode=0 Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.814948 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerDied","Data":"08ccb93f46422dd1f933ec7153a7e845e956a52ebee034d78c90b916cff909b6"} Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.816684 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerStarted","Data":"c2d4168f92fb9a40ea344b5b066a6ffc3503f172fdd526eab232e7b3d5939adb"} Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.816800 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-log" containerID="cri-o://c2ffa56cd139f004c7d439472272dd3023bf48752cbb1e5ed6fdb3bea0bf1ac7" gracePeriod=30 Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.816845 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-httpd" containerID="cri-o://c2d4168f92fb9a40ea344b5b066a6ffc3503f172fdd526eab232e7b3d5939adb" gracePeriod=30 Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.823260 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerStarted","Data":"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f"} Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.823397 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:13:55 crc kubenswrapper[4852]: I0129 12:13:55.840571 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.840554374 podStartE2EDuration="3.840554374s" podCreationTimestamp="2026-01-29 12:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:55.833067762 +0000 UTC m=+5533.050398896" watchObservedRunningTime="2026-01-29 12:13:55.840554374 +0000 UTC m=+5533.057885508" Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.835487 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerID="9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f" exitCode=0 Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.836222 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerDied","Data":"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f"} Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.845527 4852 generic.go:334] "Generic (PLEG): container finished" podID="57eeed06-46d9-40a6-ad69-0667f362561d" containerID="c2d4168f92fb9a40ea344b5b066a6ffc3503f172fdd526eab232e7b3d5939adb" exitCode=0 Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.845568 4852 generic.go:334] "Generic (PLEG): container finished" podID="57eeed06-46d9-40a6-ad69-0667f362561d" containerID="c2ffa56cd139f004c7d439472272dd3023bf48752cbb1e5ed6fdb3bea0bf1ac7" exitCode=143 Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.845670 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerDied","Data":"c2d4168f92fb9a40ea344b5b066a6ffc3503f172fdd526eab232e7b3d5939adb"} Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.845705 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerDied","Data":"c2ffa56cd139f004c7d439472272dd3023bf48752cbb1e5ed6fdb3bea0bf1ac7"} Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.846033 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-log" containerID="cri-o://653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" gracePeriod=30 Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.846092 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-httpd" containerID="cri-o://9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" gracePeriod=30 Jan 29 12:13:56 crc kubenswrapper[4852]: I0129 12:13:56.864555 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.864534714 podStartE2EDuration="4.864534714s" podCreationTimestamp="2026-01-29 12:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:55.897032471 +0000 UTC m=+5533.114363615" watchObservedRunningTime="2026-01-29 12:13:56.864534714 +0000 UTC m=+5534.081865848" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.027874 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.175789 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z7k2\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.175849 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.175883 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.175951 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176059 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176137 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176233 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data\") pod \"57eeed06-46d9-40a6-ad69-0667f362561d\" (UID: \"57eeed06-46d9-40a6-ad69-0667f362561d\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176315 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs" (OuterVolumeSpecName: "logs") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176357 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176752 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.176779 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57eeed06-46d9-40a6-ad69-0667f362561d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.182241 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts" (OuterVolumeSpecName: "scripts") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.187853 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2" (OuterVolumeSpecName: "kube-api-access-7z7k2") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "kube-api-access-7z7k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.196430 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph" (OuterVolumeSpecName: "ceph") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.208028 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.227042 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data" (OuterVolumeSpecName: "config-data") pod "57eeed06-46d9-40a6-ad69-0667f362561d" (UID: "57eeed06-46d9-40a6-ad69-0667f362561d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.281018 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.281058 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.281070 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.281083 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z7k2\" (UniqueName: \"kubernetes.io/projected/57eeed06-46d9-40a6-ad69-0667f362561d-kube-api-access-7z7k2\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.281093 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57eeed06-46d9-40a6-ad69-0667f362561d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.410614 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.585695 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.585769 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.585871 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.585974 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjjq8\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586027 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586225 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs\") pod \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\" (UID: \"ba66f16d-9993-4cf2-9e9b-29dc12f2f915\") " Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586621 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586641 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs" (OuterVolumeSpecName: "logs") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586885 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.586898 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.589892 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph" (OuterVolumeSpecName: "ceph") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.589941 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8" (OuterVolumeSpecName: "kube-api-access-tjjq8") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "kube-api-access-tjjq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.590486 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts" (OuterVolumeSpecName: "scripts") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.621673 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.662468 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data" (OuterVolumeSpecName: "config-data") pod "ba66f16d-9993-4cf2-9e9b-29dc12f2f915" (UID: "ba66f16d-9993-4cf2-9e9b-29dc12f2f915"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.688271 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.688313 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjjq8\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-kube-api-access-tjjq8\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.688332 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.688345 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.688355 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba66f16d-9993-4cf2-9e9b-29dc12f2f915-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.859267 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57eeed06-46d9-40a6-ad69-0667f362561d","Type":"ContainerDied","Data":"251a0229353960b8e33175976ff59cf67cfe5767a323c4fa1a63215062f0a63a"} Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.859715 4852 scope.go:117] "RemoveContainer" containerID="c2d4168f92fb9a40ea344b5b066a6ffc3503f172fdd526eab232e7b3d5939adb" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.859317 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862547 4852 generic.go:334] "Generic (PLEG): container finished" podID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerID="9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" exitCode=0 Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862603 4852 generic.go:334] "Generic (PLEG): container finished" podID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerID="653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" exitCode=143 Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862678 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862672 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerDied","Data":"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df"} Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862841 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerDied","Data":"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0"} Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.862862 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ba66f16d-9993-4cf2-9e9b-29dc12f2f915","Type":"ContainerDied","Data":"2c0ea8c884bd6c9a80a3329c71eb4f126cf3d57db226dcb76e22755019ee69da"} Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.872211 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerStarted","Data":"7406687986df77d97663e69308a0839e3129d7d59077813d3bb269979a882512"} Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.890352 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.901848 4852 scope.go:117] "RemoveContainer" containerID="c2ffa56cd139f004c7d439472272dd3023bf48752cbb1e5ed6fdb3bea0bf1ac7" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.914206 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.914746 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nr88d" podStartSLOduration=3.953286282 podStartE2EDuration="7.914724774s" podCreationTimestamp="2026-01-29 12:13:50 +0000 UTC" firstStartedPulling="2026-01-29 12:13:52.690877268 +0000 UTC m=+5529.908208402" lastFinishedPulling="2026-01-29 12:13:56.65231576 +0000 UTC m=+5533.869646894" observedRunningTime="2026-01-29 12:13:57.903516051 +0000 UTC m=+5535.120847195" watchObservedRunningTime="2026-01-29 12:13:57.914724774 +0000 UTC m=+5535.132055908" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.937600 4852 scope.go:117] "RemoveContainer" containerID="9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.957335 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: E0129 12:13:57.957986 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958006 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: E0129 12:13:57.958039 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958047 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: E0129 12:13:57.958068 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958077 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: E0129 12:13:57.958095 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958103 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958312 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958344 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958366 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" containerName="glance-httpd" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.958377 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" containerName="glance-log" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.959621 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.963863 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.964242 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-88bfk" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.964395 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.968480 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.976292 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.976498 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.990150 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:57 crc kubenswrapper[4852]: I0129 12:13:57.999493 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.000986 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.003900 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.035063 4852 scope.go:117] "RemoveContainer" containerID="653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.040367 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.065391 4852 scope.go:117] "RemoveContainer" containerID="9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" Jan 29 12:13:58 crc kubenswrapper[4852]: E0129 12:13:58.066073 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df\": container with ID starting with 9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df not found: ID does not exist" containerID="9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.066117 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df"} err="failed to get container status \"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df\": rpc error: code = NotFound desc = could not find container \"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df\": container with ID starting with 9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df not found: ID does not exist" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.066149 4852 scope.go:117] "RemoveContainer" containerID="653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" Jan 29 12:13:58 crc kubenswrapper[4852]: E0129 12:13:58.066527 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0\": container with ID starting with 653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0 not found: ID does not exist" containerID="653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.066564 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0"} err="failed to get container status \"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0\": rpc error: code = NotFound desc = could not find container \"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0\": container with ID starting with 653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0 not found: ID does not exist" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.066578 4852 scope.go:117] "RemoveContainer" containerID="9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.068719 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df"} err="failed to get container status \"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df\": rpc error: code = NotFound desc = could not find container \"9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df\": container with ID starting with 9c6ae441d4d91637a485073603bce1178f92667b21741ec1aa261c989b0591df not found: ID does not exist" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.068803 4852 scope.go:117] "RemoveContainer" containerID="653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.070148 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0"} err="failed to get container status \"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0\": rpc error: code = NotFound desc = could not find container \"653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0\": container with ID starting with 653f8eb229161e8b4bf918b56d8b9d5ef762dfe6490d642e9e508def4b4c53b0 not found: ID does not exist" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.104872 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.104937 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.104994 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.105024 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.105050 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.105068 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tgl2\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.105115 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207282 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207376 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207436 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207462 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207479 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207502 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207556 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207572 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207605 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tgl2\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207623 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207640 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcqnt\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207682 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.207709 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.208525 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.209075 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.212450 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.212888 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.219912 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.219963 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.242621 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tgl2\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2\") pod \"glance-default-external-api-0\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.309253 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.309353 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.309964 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310246 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310295 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310360 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310851 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310910 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.310939 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcqnt\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.315079 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.315569 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.317233 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.318430 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.332309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcqnt\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt\") pod \"glance-default-internal-api-0\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.347009 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.380989 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.887816 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerStarted","Data":"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579"} Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.911941 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cjwlq" podStartSLOduration=3.9973409220000002 podStartE2EDuration="7.911919952s" podCreationTimestamp="2026-01-29 12:13:51 +0000 UTC" firstStartedPulling="2026-01-29 12:13:53.913451961 +0000 UTC m=+5531.130783095" lastFinishedPulling="2026-01-29 12:13:57.828030981 +0000 UTC m=+5535.045362125" observedRunningTime="2026-01-29 12:13:58.905538046 +0000 UTC m=+5536.122869190" watchObservedRunningTime="2026-01-29 12:13:58.911919952 +0000 UTC m=+5536.129251086" Jan 29 12:13:58 crc kubenswrapper[4852]: I0129 12:13:58.986524 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:13:58 crc kubenswrapper[4852]: W0129 12:13:58.994429 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4c7fc2f_94a0_4558_9e2b_f3578f70f273.slice/crio-a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44 WatchSource:0}: Error finding container a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44: Status 404 returned error can't find the container with id a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44 Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.073342 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.477701 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57eeed06-46d9-40a6-ad69-0667f362561d" path="/var/lib/kubelet/pods/57eeed06-46d9-40a6-ad69-0667f362561d/volumes" Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.479008 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba66f16d-9993-4cf2-9e9b-29dc12f2f915" path="/var/lib/kubelet/pods/ba66f16d-9993-4cf2-9e9b-29dc12f2f915/volumes" Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.908112 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerStarted","Data":"477c38f3d0f87ca787dd0356a5c5555536cea472d2232f6237c4ec314f17d311"} Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.908173 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerStarted","Data":"a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44"} Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.922101 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerStarted","Data":"818d5943059b1be294fd900bff7493265d9f751e2a7cbcfedddf3f01fd99164f"} Jan 29 12:13:59 crc kubenswrapper[4852]: I0129 12:13:59.922174 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerStarted","Data":"49b03f24661ff317aa78cc254c4a58731713b236418e2759496c18e2b859f52f"} Jan 29 12:14:00 crc kubenswrapper[4852]: I0129 12:14:00.931888 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerStarted","Data":"1a022c41f9a4dba7aa1af297af043a773c50fbd4f4b1128a006e75dc5a0cdd39"} Jan 29 12:14:00 crc kubenswrapper[4852]: I0129 12:14:00.933808 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerStarted","Data":"ee9925a0b1a59b2f18a02c9a41f2fc272ba5cc720235c90a7390316b7a640a19"} Jan 29 12:14:00 crc kubenswrapper[4852]: I0129 12:14:00.954489 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.95446979 podStartE2EDuration="3.95446979s" podCreationTimestamp="2026-01-29 12:13:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:14:00.952431461 +0000 UTC m=+5538.169762605" watchObservedRunningTime="2026-01-29 12:14:00.95446979 +0000 UTC m=+5538.171800944" Jan 29 12:14:00 crc kubenswrapper[4852]: I0129 12:14:00.977791 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.977771449 podStartE2EDuration="3.977771449s" podCreationTimestamp="2026-01-29 12:13:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:14:00.975548865 +0000 UTC m=+5538.192879999" watchObservedRunningTime="2026-01-29 12:14:00.977771449 +0000 UTC m=+5538.195102583" Jan 29 12:14:01 crc kubenswrapper[4852]: I0129 12:14:01.039935 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:01 crc kubenswrapper[4852]: I0129 12:14:01.040207 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:01 crc kubenswrapper[4852]: I0129 12:14:01.086528 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:01 crc kubenswrapper[4852]: I0129 12:14:01.990056 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.133540 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.133604 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.371771 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.426992 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.427269 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="dnsmasq-dns" containerID="cri-o://2849f04a8e34508e1df10a2a6ca28cfd4152242f9b10d8b90881fe966fba6806" gracePeriod=10 Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.497960 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.953712 4852 generic.go:334] "Generic (PLEG): container finished" podID="bd9accf7-0c08-4859-871b-5251747e0edb" containerID="2849f04a8e34508e1df10a2a6ca28cfd4152242f9b10d8b90881fe966fba6806" exitCode=0 Jan 29 12:14:02 crc kubenswrapper[4852]: I0129 12:14:02.953885 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" event={"ID":"bd9accf7-0c08-4859-871b-5251747e0edb","Type":"ContainerDied","Data":"2849f04a8e34508e1df10a2a6ca28cfd4152242f9b10d8b90881fe966fba6806"} Jan 29 12:14:03 crc kubenswrapper[4852]: I0129 12:14:03.174393 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cjwlq" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="registry-server" probeResult="failure" output=< Jan 29 12:14:03 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:14:03 crc kubenswrapper[4852]: > Jan 29 12:14:03 crc kubenswrapper[4852]: I0129 12:14:03.969144 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nr88d" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="registry-server" containerID="cri-o://7406687986df77d97663e69308a0839e3129d7d59077813d3bb269979a882512" gracePeriod=2 Jan 29 12:14:03 crc kubenswrapper[4852]: I0129 12:14:03.969973 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" event={"ID":"bd9accf7-0c08-4859-871b-5251747e0edb","Type":"ContainerDied","Data":"ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56"} Jan 29 12:14:03 crc kubenswrapper[4852]: I0129 12:14:03.970017 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddbb6ec743a0c14973ac2036a568726c52cadea1cce729010006fc6f19286e56" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.000750 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.128616 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fk94k\" (UniqueName: \"kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k\") pod \"bd9accf7-0c08-4859-871b-5251747e0edb\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.128677 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb\") pod \"bd9accf7-0c08-4859-871b-5251747e0edb\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.128713 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb\") pod \"bd9accf7-0c08-4859-871b-5251747e0edb\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.128850 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc\") pod \"bd9accf7-0c08-4859-871b-5251747e0edb\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.128959 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config\") pod \"bd9accf7-0c08-4859-871b-5251747e0edb\" (UID: \"bd9accf7-0c08-4859-871b-5251747e0edb\") " Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.134072 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k" (OuterVolumeSpecName: "kube-api-access-fk94k") pod "bd9accf7-0c08-4859-871b-5251747e0edb" (UID: "bd9accf7-0c08-4859-871b-5251747e0edb"). InnerVolumeSpecName "kube-api-access-fk94k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.171684 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd9accf7-0c08-4859-871b-5251747e0edb" (UID: "bd9accf7-0c08-4859-871b-5251747e0edb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.180237 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bd9accf7-0c08-4859-871b-5251747e0edb" (UID: "bd9accf7-0c08-4859-871b-5251747e0edb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.181415 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config" (OuterVolumeSpecName: "config") pod "bd9accf7-0c08-4859-871b-5251747e0edb" (UID: "bd9accf7-0c08-4859-871b-5251747e0edb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.189920 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd9accf7-0c08-4859-871b-5251747e0edb" (UID: "bd9accf7-0c08-4859-871b-5251747e0edb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.230786 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.230835 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.230849 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fk94k\" (UniqueName: \"kubernetes.io/projected/bd9accf7-0c08-4859-871b-5251747e0edb-kube-api-access-fk94k\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.230862 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.230873 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd9accf7-0c08-4859-871b-5251747e0edb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.980516 4852 generic.go:334] "Generic (PLEG): container finished" podID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerID="7406687986df77d97663e69308a0839e3129d7d59077813d3bb269979a882512" exitCode=0 Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.980607 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerDied","Data":"7406687986df77d97663e69308a0839e3129d7d59077813d3bb269979a882512"} Jan 29 12:14:04 crc kubenswrapper[4852]: I0129 12:14:04.981249 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d9c6bc7-vhqpn" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.014014 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.022216 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-774d9c6bc7-vhqpn"] Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.475445 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" path="/var/lib/kubelet/pods/bd9accf7-0c08-4859-871b-5251747e0edb/volumes" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.729747 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.858436 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities\") pod \"ad8213a7-efd7-4e84-86b4-c5af02319009\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.858514 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content\") pod \"ad8213a7-efd7-4e84-86b4-c5af02319009\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.858544 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z22xs\" (UniqueName: \"kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs\") pod \"ad8213a7-efd7-4e84-86b4-c5af02319009\" (UID: \"ad8213a7-efd7-4e84-86b4-c5af02319009\") " Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.859385 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities" (OuterVolumeSpecName: "utilities") pod "ad8213a7-efd7-4e84-86b4-c5af02319009" (UID: "ad8213a7-efd7-4e84-86b4-c5af02319009"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.864645 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs" (OuterVolumeSpecName: "kube-api-access-z22xs") pod "ad8213a7-efd7-4e84-86b4-c5af02319009" (UID: "ad8213a7-efd7-4e84-86b4-c5af02319009"). InnerVolumeSpecName "kube-api-access-z22xs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.960894 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z22xs\" (UniqueName: \"kubernetes.io/projected/ad8213a7-efd7-4e84-86b4-c5af02319009-kube-api-access-z22xs\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.960938 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.992353 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nr88d" event={"ID":"ad8213a7-efd7-4e84-86b4-c5af02319009","Type":"ContainerDied","Data":"61e635b3b97e6c9ff07de37d05b82e620a8a5379bb69fbc7f0c0947357414efe"} Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.992419 4852 scope.go:117] "RemoveContainer" containerID="7406687986df77d97663e69308a0839e3129d7d59077813d3bb269979a882512" Jan 29 12:14:05 crc kubenswrapper[4852]: I0129 12:14:05.992530 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nr88d" Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.013819 4852 scope.go:117] "RemoveContainer" containerID="08ccb93f46422dd1f933ec7153a7e845e956a52ebee034d78c90b916cff909b6" Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.034758 4852 scope.go:117] "RemoveContainer" containerID="6d437c18a5e5f2c9c905fed30d82c604f1c2e5653edf35ff63e435dc68e76620" Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.136131 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad8213a7-efd7-4e84-86b4-c5af02319009" (UID: "ad8213a7-efd7-4e84-86b4-c5af02319009"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.164052 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad8213a7-efd7-4e84-86b4-c5af02319009-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.335638 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:14:06 crc kubenswrapper[4852]: I0129 12:14:06.345544 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nr88d"] Jan 29 12:14:07 crc kubenswrapper[4852]: I0129 12:14:07.474718 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" path="/var/lib/kubelet/pods/ad8213a7-efd7-4e84-86b4-c5af02319009/volumes" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.347464 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.347541 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.380918 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.381750 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.381801 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.400106 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.431011 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:08 crc kubenswrapper[4852]: I0129 12:14:08.448304 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:09 crc kubenswrapper[4852]: I0129 12:14:09.022403 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:09 crc kubenswrapper[4852]: I0129 12:14:09.022806 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 12:14:09 crc kubenswrapper[4852]: I0129 12:14:09.022819 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:09 crc kubenswrapper[4852]: I0129 12:14:09.022828 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.172269 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.172698 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.178249 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.316084 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.316206 4852 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 12:14:11 crc kubenswrapper[4852]: I0129 12:14:11.317879 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 12:14:12 crc kubenswrapper[4852]: I0129 12:14:12.178501 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:12 crc kubenswrapper[4852]: I0129 12:14:12.229927 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:12 crc kubenswrapper[4852]: I0129 12:14:12.418812 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:14:14 crc kubenswrapper[4852]: E0129 12:14:14.042157 4852 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:59636->38.102.83.23:36165: write tcp 38.102.83.23:59636->38.102.83.23:36165: write: connection reset by peer Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.073946 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cjwlq" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="registry-server" containerID="cri-o://45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579" gracePeriod=2 Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.599988 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.641847 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n8gr\" (UniqueName: \"kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr\") pod \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.641983 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content\") pod \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.642211 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities\") pod \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\" (UID: \"8f6de4b9-d582-46ec-8c45-3df23b9ad76f\") " Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.643320 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities" (OuterVolumeSpecName: "utilities") pod "8f6de4b9-d582-46ec-8c45-3df23b9ad76f" (UID: "8f6de4b9-d582-46ec-8c45-3df23b9ad76f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.647642 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr" (OuterVolumeSpecName: "kube-api-access-9n8gr") pod "8f6de4b9-d582-46ec-8c45-3df23b9ad76f" (UID: "8f6de4b9-d582-46ec-8c45-3df23b9ad76f"). InnerVolumeSpecName "kube-api-access-9n8gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.744382 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.744410 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n8gr\" (UniqueName: \"kubernetes.io/projected/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-kube-api-access-9n8gr\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.777007 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f6de4b9-d582-46ec-8c45-3df23b9ad76f" (UID: "8f6de4b9-d582-46ec-8c45-3df23b9ad76f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:14 crc kubenswrapper[4852]: I0129 12:14:14.846153 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6de4b9-d582-46ec-8c45-3df23b9ad76f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.085898 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerID="45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579" exitCode=0 Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.085951 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerDied","Data":"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579"} Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.085987 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cjwlq" event={"ID":"8f6de4b9-d582-46ec-8c45-3df23b9ad76f","Type":"ContainerDied","Data":"e99b2c4f1845716bffe79f97c022645831f638022d8093bb6e053f0f4db66426"} Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.086009 4852 scope.go:117] "RemoveContainer" containerID="45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.086005 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cjwlq" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.128985 4852 scope.go:117] "RemoveContainer" containerID="9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.135122 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.169379 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cjwlq"] Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.177330 4852 scope.go:117] "RemoveContainer" containerID="72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.219473 4852 scope.go:117] "RemoveContainer" containerID="45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579" Jan 29 12:14:15 crc kubenswrapper[4852]: E0129 12:14:15.219917 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579\": container with ID starting with 45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579 not found: ID does not exist" containerID="45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.219951 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579"} err="failed to get container status \"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579\": rpc error: code = NotFound desc = could not find container \"45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579\": container with ID starting with 45b6cf0b53fc51500c75b26e4662eaf51ee826f1b5f8f216207dc8a009d7b579 not found: ID does not exist" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.219972 4852 scope.go:117] "RemoveContainer" containerID="9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f" Jan 29 12:14:15 crc kubenswrapper[4852]: E0129 12:14:15.220221 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f\": container with ID starting with 9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f not found: ID does not exist" containerID="9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.220321 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f"} err="failed to get container status \"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f\": rpc error: code = NotFound desc = could not find container \"9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f\": container with ID starting with 9d8a18a814c5c19471cee88d64e182b0351fb9d0877df30c3b29f9bffea5cc6f not found: ID does not exist" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.220359 4852 scope.go:117] "RemoveContainer" containerID="72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150" Jan 29 12:14:15 crc kubenswrapper[4852]: E0129 12:14:15.220752 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150\": container with ID starting with 72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150 not found: ID does not exist" containerID="72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.220859 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150"} err="failed to get container status \"72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150\": rpc error: code = NotFound desc = could not find container \"72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150\": container with ID starting with 72178f3233c3db64c8e40e73a9a74f9df73bc2e53fccc280ed7d4c5987bcd150 not found: ID does not exist" Jan 29 12:14:15 crc kubenswrapper[4852]: I0129 12:14:15.480105 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" path="/var/lib/kubelet/pods/8f6de4b9-d582-46ec-8c45-3df23b9ad76f/volumes" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.638571 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-5g48v"] Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.639851 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.639879 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.639899 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="dnsmasq-dns" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.639910 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="dnsmasq-dns" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.639928 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.639937 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.639965 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="extract-utilities" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.639974 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="extract-utilities" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.639997 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="extract-utilities" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640006 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="extract-utilities" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.640029 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="extract-content" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640039 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="extract-content" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.640063 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="init" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640073 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="init" Jan 29 12:14:17 crc kubenswrapper[4852]: E0129 12:14:17.640105 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="extract-content" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640114 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="extract-content" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640397 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9accf7-0c08-4859-871b-5251747e0edb" containerName="dnsmasq-dns" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640427 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f6de4b9-d582-46ec-8c45-3df23b9ad76f" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.640448 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad8213a7-efd7-4e84-86b4-c5af02319009" containerName="registry-server" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.641509 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.651921 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f77b-account-create-update-rjxjs"] Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.653183 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.659301 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5g48v"] Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.663798 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f77b-account-create-update-rjxjs"] Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.664289 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.712486 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcm8m\" (UniqueName: \"kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.712559 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7mmx\" (UniqueName: \"kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.712726 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.712842 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.814291 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.814379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcm8m\" (UniqueName: \"kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.814441 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7mmx\" (UniqueName: \"kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.814538 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.815095 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.815671 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.843234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7mmx\" (UniqueName: \"kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx\") pod \"placement-db-create-5g48v\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " pod="openstack/placement-db-create-5g48v" Jan 29 12:14:17 crc kubenswrapper[4852]: I0129 12:14:17.843248 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcm8m\" (UniqueName: \"kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m\") pod \"placement-f77b-account-create-update-rjxjs\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:18 crc kubenswrapper[4852]: I0129 12:14:18.023627 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5g48v" Jan 29 12:14:18 crc kubenswrapper[4852]: I0129 12:14:18.031779 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:18 crc kubenswrapper[4852]: W0129 12:14:18.497112 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0f22b7d_83b7_4d5c_b56b_98a1d4edfa03.slice/crio-2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f WatchSource:0}: Error finding container 2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f: Status 404 returned error can't find the container with id 2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f Jan 29 12:14:18 crc kubenswrapper[4852]: I0129 12:14:18.512130 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f77b-account-create-update-rjxjs"] Jan 29 12:14:18 crc kubenswrapper[4852]: I0129 12:14:18.565932 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5g48v"] Jan 29 12:14:18 crc kubenswrapper[4852]: W0129 12:14:18.568339 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06f552c0_01ac_4362_9493_19f42cbfcbee.slice/crio-22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9 WatchSource:0}: Error finding container 22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9: Status 404 returned error can't find the container with id 22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9 Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.136547 4852 generic.go:334] "Generic (PLEG): container finished" podID="d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" containerID="5d27a78500353e928d3111a281b88b7cbd9e9bb3fea8cb1925e1eff87788016e" exitCode=0 Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.136610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f77b-account-create-update-rjxjs" event={"ID":"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03","Type":"ContainerDied","Data":"5d27a78500353e928d3111a281b88b7cbd9e9bb3fea8cb1925e1eff87788016e"} Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.137010 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f77b-account-create-update-rjxjs" event={"ID":"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03","Type":"ContainerStarted","Data":"2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f"} Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.139922 4852 generic.go:334] "Generic (PLEG): container finished" podID="06f552c0-01ac-4362-9493-19f42cbfcbee" containerID="675351371d42be5732231c876d522e15356c0483d055226ac9029470a8952029" exitCode=0 Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.139983 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5g48v" event={"ID":"06f552c0-01ac-4362-9493-19f42cbfcbee","Type":"ContainerDied","Data":"675351371d42be5732231c876d522e15356c0483d055226ac9029470a8952029"} Jan 29 12:14:19 crc kubenswrapper[4852]: I0129 12:14:19.140023 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5g48v" event={"ID":"06f552c0-01ac-4362-9493-19f42cbfcbee","Type":"ContainerStarted","Data":"22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9"} Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.648528 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.652661 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5g48v" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.668857 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts\") pod \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.668909 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts\") pod \"06f552c0-01ac-4362-9493-19f42cbfcbee\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.668972 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7mmx\" (UniqueName: \"kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx\") pod \"06f552c0-01ac-4362-9493-19f42cbfcbee\" (UID: \"06f552c0-01ac-4362-9493-19f42cbfcbee\") " Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.669083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcm8m\" (UniqueName: \"kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m\") pod \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\" (UID: \"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03\") " Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.670391 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" (UID: "d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.677551 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx" (OuterVolumeSpecName: "kube-api-access-l7mmx") pod "06f552c0-01ac-4362-9493-19f42cbfcbee" (UID: "06f552c0-01ac-4362-9493-19f42cbfcbee"). InnerVolumeSpecName "kube-api-access-l7mmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.670910 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "06f552c0-01ac-4362-9493-19f42cbfcbee" (UID: "06f552c0-01ac-4362-9493-19f42cbfcbee"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.679901 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m" (OuterVolumeSpecName: "kube-api-access-bcm8m") pod "d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" (UID: "d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03"). InnerVolumeSpecName "kube-api-access-bcm8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.770959 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7mmx\" (UniqueName: \"kubernetes.io/projected/06f552c0-01ac-4362-9493-19f42cbfcbee-kube-api-access-l7mmx\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.771001 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcm8m\" (UniqueName: \"kubernetes.io/projected/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-kube-api-access-bcm8m\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.771021 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:20 crc kubenswrapper[4852]: I0129 12:14:20.771031 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/06f552c0-01ac-4362-9493-19f42cbfcbee-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.158412 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f77b-account-create-update-rjxjs" event={"ID":"d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03","Type":"ContainerDied","Data":"2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f"} Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.158782 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2104dec76e50bb38baf98899504604c3a4ac44e439324b394644eca29c92e82f" Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.158474 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f77b-account-create-update-rjxjs" Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.160182 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5g48v" event={"ID":"06f552c0-01ac-4362-9493-19f42cbfcbee","Type":"ContainerDied","Data":"22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9"} Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.160216 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22c7e8688141e39865eadf9f05633ae489d3c860becb95cad530a54a38f346d9" Jan 29 12:14:21 crc kubenswrapper[4852]: I0129 12:14:21.160243 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5g48v" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.896766 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-ghr6b"] Jan 29 12:14:22 crc kubenswrapper[4852]: E0129 12:14:22.897202 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06f552c0-01ac-4362-9493-19f42cbfcbee" containerName="mariadb-database-create" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.897221 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="06f552c0-01ac-4362-9493-19f42cbfcbee" containerName="mariadb-database-create" Jan 29 12:14:22 crc kubenswrapper[4852]: E0129 12:14:22.897260 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" containerName="mariadb-account-create-update" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.897266 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" containerName="mariadb-account-create-update" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.897438 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="06f552c0-01ac-4362-9493-19f42cbfcbee" containerName="mariadb-database-create" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.897462 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" containerName="mariadb-account-create-update" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.898167 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.900449 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.900913 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nft4b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.902308 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.909504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5sfd\" (UniqueName: \"kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.909601 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.909664 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.909713 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.909734 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.911328 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.913246 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.939785 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ghr6b"] Jan 29 12:14:22 crc kubenswrapper[4852]: I0129 12:14:22.951771 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011488 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011558 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsjrl\" (UniqueName: \"kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011612 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011650 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011678 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011714 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011741 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011778 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011820 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5sfd\" (UniqueName: \"kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.011883 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.012492 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.018267 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.019108 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.023447 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.031000 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5sfd\" (UniqueName: \"kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd\") pod \"placement-db-sync-ghr6b\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.112932 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.113081 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsjrl\" (UniqueName: \"kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.113114 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.113147 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.113172 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.113960 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.114059 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.114197 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.114430 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.132067 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsjrl\" (UniqueName: \"kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl\") pod \"dnsmasq-dns-77c4d4b58c-2ghdr\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.217078 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.236093 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.756049 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-ghr6b"] Jan 29 12:14:23 crc kubenswrapper[4852]: I0129 12:14:23.763126 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.190462 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ghr6b" event={"ID":"7a3ec239-c977-4064-9544-1075adddf3d1","Type":"ContainerStarted","Data":"3f82b0084eef0b8949f74951e617a03c52f60e51c4815c8130f1e258fa227b84"} Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.190540 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ghr6b" event={"ID":"7a3ec239-c977-4064-9544-1075adddf3d1","Type":"ContainerStarted","Data":"bee08500822d99f8a2b6818deb67ea3f1d25774a9fc3bc3115199de6a11efa27"} Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.193153 4852 generic.go:334] "Generic (PLEG): container finished" podID="f722387b-877f-46ff-9674-8bf258e77be7" containerID="c42b3be31da1746ffd5bdb7bd09052ca758af5d5c333bf0227cf280d8a951887" exitCode=0 Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.193187 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" event={"ID":"f722387b-877f-46ff-9674-8bf258e77be7","Type":"ContainerDied","Data":"c42b3be31da1746ffd5bdb7bd09052ca758af5d5c333bf0227cf280d8a951887"} Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.193224 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" event={"ID":"f722387b-877f-46ff-9674-8bf258e77be7","Type":"ContainerStarted","Data":"314818ed3556dfe0e186cf0d8e192158821234b9bd87350ee5d9bcaf5769914b"} Jan 29 12:14:24 crc kubenswrapper[4852]: I0129 12:14:24.212613 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-ghr6b" podStartSLOduration=2.212571965 podStartE2EDuration="2.212571965s" podCreationTimestamp="2026-01-29 12:14:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:14:24.209231353 +0000 UTC m=+5561.426562487" watchObservedRunningTime="2026-01-29 12:14:24.212571965 +0000 UTC m=+5561.429903099" Jan 29 12:14:25 crc kubenswrapper[4852]: I0129 12:14:25.206346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" event={"ID":"f722387b-877f-46ff-9674-8bf258e77be7","Type":"ContainerStarted","Data":"7600c84cdfbf0bb92cb25bd890e47f1d59cdce326e845d58dcf8b790fe52851b"} Jan 29 12:14:25 crc kubenswrapper[4852]: I0129 12:14:25.206767 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:25 crc kubenswrapper[4852]: I0129 12:14:25.235575 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" podStartSLOduration=3.2355549200000002 podStartE2EDuration="3.23555492s" podCreationTimestamp="2026-01-29 12:14:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:14:25.226622353 +0000 UTC m=+5562.443953507" watchObservedRunningTime="2026-01-29 12:14:25.23555492 +0000 UTC m=+5562.452886064" Jan 29 12:14:26 crc kubenswrapper[4852]: I0129 12:14:26.218346 4852 generic.go:334] "Generic (PLEG): container finished" podID="7a3ec239-c977-4064-9544-1075adddf3d1" containerID="3f82b0084eef0b8949f74951e617a03c52f60e51c4815c8130f1e258fa227b84" exitCode=0 Jan 29 12:14:26 crc kubenswrapper[4852]: I0129 12:14:26.219736 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ghr6b" event={"ID":"7a3ec239-c977-4064-9544-1075adddf3d1","Type":"ContainerDied","Data":"3f82b0084eef0b8949f74951e617a03c52f60e51c4815c8130f1e258fa227b84"} Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.558607 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.721739 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data\") pod \"7a3ec239-c977-4064-9544-1075adddf3d1\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.721913 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5sfd\" (UniqueName: \"kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd\") pod \"7a3ec239-c977-4064-9544-1075adddf3d1\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.721987 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs\") pod \"7a3ec239-c977-4064-9544-1075adddf3d1\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.722044 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle\") pod \"7a3ec239-c977-4064-9544-1075adddf3d1\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.722101 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts\") pod \"7a3ec239-c977-4064-9544-1075adddf3d1\" (UID: \"7a3ec239-c977-4064-9544-1075adddf3d1\") " Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.722578 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs" (OuterVolumeSpecName: "logs") pod "7a3ec239-c977-4064-9544-1075adddf3d1" (UID: "7a3ec239-c977-4064-9544-1075adddf3d1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.727129 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts" (OuterVolumeSpecName: "scripts") pod "7a3ec239-c977-4064-9544-1075adddf3d1" (UID: "7a3ec239-c977-4064-9544-1075adddf3d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.727730 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd" (OuterVolumeSpecName: "kube-api-access-s5sfd") pod "7a3ec239-c977-4064-9544-1075adddf3d1" (UID: "7a3ec239-c977-4064-9544-1075adddf3d1"). InnerVolumeSpecName "kube-api-access-s5sfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.748183 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data" (OuterVolumeSpecName: "config-data") pod "7a3ec239-c977-4064-9544-1075adddf3d1" (UID: "7a3ec239-c977-4064-9544-1075adddf3d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.751761 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a3ec239-c977-4064-9544-1075adddf3d1" (UID: "7a3ec239-c977-4064-9544-1075adddf3d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.824265 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.824307 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5sfd\" (UniqueName: \"kubernetes.io/projected/7a3ec239-c977-4064-9544-1075adddf3d1-kube-api-access-s5sfd\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.824318 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a3ec239-c977-4064-9544-1075adddf3d1-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.824326 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:27 crc kubenswrapper[4852]: I0129 12:14:27.824335 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a3ec239-c977-4064-9544-1075adddf3d1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.234463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-ghr6b" event={"ID":"7a3ec239-c977-4064-9544-1075adddf3d1","Type":"ContainerDied","Data":"bee08500822d99f8a2b6818deb67ea3f1d25774a9fc3bc3115199de6a11efa27"} Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.234779 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bee08500822d99f8a2b6818deb67ea3f1d25774a9fc3bc3115199de6a11efa27" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.234506 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-ghr6b" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.314693 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7cfd49c966-l87cv"] Jan 29 12:14:28 crc kubenswrapper[4852]: E0129 12:14:28.315124 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a3ec239-c977-4064-9544-1075adddf3d1" containerName="placement-db-sync" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.315143 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a3ec239-c977-4064-9544-1075adddf3d1" containerName="placement-db-sync" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.315357 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a3ec239-c977-4064-9544-1075adddf3d1" containerName="placement-db-sync" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.316875 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.321355 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.321771 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.322127 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nft4b" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.330322 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7cfd49c966-l87cv"] Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.440720 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-scripts\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.441019 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-logs\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.441386 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-combined-ca-bundle\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.441428 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-config-data\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.441574 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7gv9\" (UniqueName: \"kubernetes.io/projected/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-kube-api-access-t7gv9\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-combined-ca-bundle\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543073 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-config-data\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543120 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7gv9\" (UniqueName: \"kubernetes.io/projected/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-kube-api-access-t7gv9\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543182 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-scripts\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543228 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-logs\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.543785 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-logs\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.548058 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-scripts\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.548852 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-combined-ca-bundle\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.550767 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-config-data\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.574189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7gv9\" (UniqueName: \"kubernetes.io/projected/9c20bbfd-ef79-4793-bd6f-fe5bc90162d0-kube-api-access-t7gv9\") pod \"placement-7cfd49c966-l87cv\" (UID: \"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0\") " pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:28 crc kubenswrapper[4852]: I0129 12:14:28.643354 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.104442 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7cfd49c966-l87cv"] Jan 29 12:14:29 crc kubenswrapper[4852]: W0129 12:14:29.112152 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c20bbfd_ef79_4793_bd6f_fe5bc90162d0.slice/crio-5f9c368d26d35ba563a1683f988776a92452bd5bdfd4ca01297527b95e05ed90 WatchSource:0}: Error finding container 5f9c368d26d35ba563a1683f988776a92452bd5bdfd4ca01297527b95e05ed90: Status 404 returned error can't find the container with id 5f9c368d26d35ba563a1683f988776a92452bd5bdfd4ca01297527b95e05ed90 Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.244453 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7cfd49c966-l87cv" event={"ID":"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0","Type":"ContainerStarted","Data":"5f9c368d26d35ba563a1683f988776a92452bd5bdfd4ca01297527b95e05ed90"} Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.728239 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.730462 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.740175 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.888620 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.888957 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h26xh\" (UniqueName: \"kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.889089 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.990371 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.990426 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h26xh\" (UniqueName: \"kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.990508 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.990983 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:29 crc kubenswrapper[4852]: I0129 12:14:29.990988 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.012229 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h26xh\" (UniqueName: \"kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh\") pod \"certified-operators-k5f9q\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.101004 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.267720 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7cfd49c966-l87cv" event={"ID":"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0","Type":"ContainerStarted","Data":"1756b6ca2665c0d93627e2f1eccb5a88bbf7abf8689732565f8f30e0a482da3f"} Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.267765 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7cfd49c966-l87cv" event={"ID":"9c20bbfd-ef79-4793-bd6f-fe5bc90162d0","Type":"ContainerStarted","Data":"3042818ed106a170b949d5a8cae02e198938fa8bdd28d2e66cbf36bdac108f83"} Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.268455 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.268808 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.290221 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7cfd49c966-l87cv" podStartSLOduration=2.290200161 podStartE2EDuration="2.290200161s" podCreationTimestamp="2026-01-29 12:14:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:14:30.289049644 +0000 UTC m=+5567.506380788" watchObservedRunningTime="2026-01-29 12:14:30.290200161 +0000 UTC m=+5567.507531295" Jan 29 12:14:30 crc kubenswrapper[4852]: I0129 12:14:30.655725 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:31 crc kubenswrapper[4852]: I0129 12:14:31.280708 4852 generic.go:334] "Generic (PLEG): container finished" podID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerID="0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00" exitCode=0 Jan 29 12:14:31 crc kubenswrapper[4852]: I0129 12:14:31.280797 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerDied","Data":"0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00"} Jan 29 12:14:31 crc kubenswrapper[4852]: I0129 12:14:31.281399 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerStarted","Data":"69f2bc235fdd922db8bd8db50cc0afc9aaaa1e7365673451307f985a23db254d"} Jan 29 12:14:32 crc kubenswrapper[4852]: I0129 12:14:32.293772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerStarted","Data":"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7"} Jan 29 12:14:33 crc kubenswrapper[4852]: I0129 12:14:33.237812 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:14:33 crc kubenswrapper[4852]: I0129 12:14:33.307112 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:14:33 crc kubenswrapper[4852]: I0129 12:14:33.307371 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="dnsmasq-dns" containerID="cri-o://82ee2f082f25cf881e48c0dc5a07227b1115d3228ec25ed656eb037a912baa5e" gracePeriod=10 Jan 29 12:14:33 crc kubenswrapper[4852]: I0129 12:14:33.312535 4852 generic.go:334] "Generic (PLEG): container finished" podID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerID="fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7" exitCode=0 Jan 29 12:14:33 crc kubenswrapper[4852]: I0129 12:14:33.312595 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerDied","Data":"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7"} Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.323761 4852 generic.go:334] "Generic (PLEG): container finished" podID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerID="82ee2f082f25cf881e48c0dc5a07227b1115d3228ec25ed656eb037a912baa5e" exitCode=0 Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.323799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" event={"ID":"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd","Type":"ContainerDied","Data":"82ee2f082f25cf881e48c0dc5a07227b1115d3228ec25ed656eb037a912baa5e"} Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.324389 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" event={"ID":"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd","Type":"ContainerDied","Data":"32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e"} Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.324412 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32d18db29b0be7ea9ca5fc25b74e95a6cb2f1591ecb22e0c5713805d876f386e" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.326208 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.327436 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerStarted","Data":"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60"} Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.363218 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k5f9q" podStartSLOduration=2.653466655 podStartE2EDuration="5.363200034s" podCreationTimestamp="2026-01-29 12:14:29 +0000 UTC" firstStartedPulling="2026-01-29 12:14:31.285700347 +0000 UTC m=+5568.503031511" lastFinishedPulling="2026-01-29 12:14:33.995433756 +0000 UTC m=+5571.212764890" observedRunningTime="2026-01-29 12:14:34.358419977 +0000 UTC m=+5571.575751151" watchObservedRunningTime="2026-01-29 12:14:34.363200034 +0000 UTC m=+5571.580531168" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.394365 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs92r\" (UniqueName: \"kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r\") pod \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.394437 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb\") pod \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.411149 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r" (OuterVolumeSpecName: "kube-api-access-qs92r") pod "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" (UID: "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd"). InnerVolumeSpecName "kube-api-access-qs92r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.444877 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" (UID: "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.495929 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config\") pod \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.495982 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb\") pod \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.496046 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc\") pod \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\" (UID: \"d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd\") " Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.496837 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs92r\" (UniqueName: \"kubernetes.io/projected/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-kube-api-access-qs92r\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.496932 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.534294 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config" (OuterVolumeSpecName: "config") pod "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" (UID: "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.538084 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" (UID: "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.539079 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" (UID: "d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.598857 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.599100 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:34 crc kubenswrapper[4852]: I0129 12:14:34.599183 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:35 crc kubenswrapper[4852]: I0129 12:14:35.336144 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ff8fb85f5-ztczg" Jan 29 12:14:35 crc kubenswrapper[4852]: I0129 12:14:35.365914 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:14:35 crc kubenswrapper[4852]: I0129 12:14:35.373824 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-ff8fb85f5-ztczg"] Jan 29 12:14:35 crc kubenswrapper[4852]: I0129 12:14:35.475747 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" path="/var/lib/kubelet/pods/d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd/volumes" Jan 29 12:14:40 crc kubenswrapper[4852]: I0129 12:14:40.101957 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:40 crc kubenswrapper[4852]: I0129 12:14:40.102190 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:40 crc kubenswrapper[4852]: I0129 12:14:40.170931 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:40 crc kubenswrapper[4852]: I0129 12:14:40.445784 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:40 crc kubenswrapper[4852]: I0129 12:14:40.503213 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:42 crc kubenswrapper[4852]: I0129 12:14:42.415700 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k5f9q" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="registry-server" containerID="cri-o://4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60" gracePeriod=2 Jan 29 12:14:42 crc kubenswrapper[4852]: I0129 12:14:42.952118 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.072308 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content\") pod \"68ccb403-148e-479d-b6fb-8d95abfd92c2\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.072599 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h26xh\" (UniqueName: \"kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh\") pod \"68ccb403-148e-479d-b6fb-8d95abfd92c2\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.072653 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities\") pod \"68ccb403-148e-479d-b6fb-8d95abfd92c2\" (UID: \"68ccb403-148e-479d-b6fb-8d95abfd92c2\") " Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.073421 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities" (OuterVolumeSpecName: "utilities") pod "68ccb403-148e-479d-b6fb-8d95abfd92c2" (UID: "68ccb403-148e-479d-b6fb-8d95abfd92c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.078011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh" (OuterVolumeSpecName: "kube-api-access-h26xh") pod "68ccb403-148e-479d-b6fb-8d95abfd92c2" (UID: "68ccb403-148e-479d-b6fb-8d95abfd92c2"). InnerVolumeSpecName "kube-api-access-h26xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.174719 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h26xh\" (UniqueName: \"kubernetes.io/projected/68ccb403-148e-479d-b6fb-8d95abfd92c2-kube-api-access-h26xh\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.174933 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.426018 4852 generic.go:334] "Generic (PLEG): container finished" podID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerID="4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60" exitCode=0 Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.426059 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k5f9q" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.426068 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerDied","Data":"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60"} Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.426098 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k5f9q" event={"ID":"68ccb403-148e-479d-b6fb-8d95abfd92c2","Type":"ContainerDied","Data":"69f2bc235fdd922db8bd8db50cc0afc9aaaa1e7365673451307f985a23db254d"} Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.426518 4852 scope.go:117] "RemoveContainer" containerID="4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.460809 4852 scope.go:117] "RemoveContainer" containerID="fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.476861 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68ccb403-148e-479d-b6fb-8d95abfd92c2" (UID: "68ccb403-148e-479d-b6fb-8d95abfd92c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.484867 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68ccb403-148e-479d-b6fb-8d95abfd92c2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.487557 4852 scope.go:117] "RemoveContainer" containerID="0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.547548 4852 scope.go:117] "RemoveContainer" containerID="4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60" Jan 29 12:14:43 crc kubenswrapper[4852]: E0129 12:14:43.548338 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60\": container with ID starting with 4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60 not found: ID does not exist" containerID="4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.548392 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60"} err="failed to get container status \"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60\": rpc error: code = NotFound desc = could not find container \"4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60\": container with ID starting with 4ddda4dbcea12efa303fc2c6d5d781c37fb2e112425eaac8c5f86ef7d377da60 not found: ID does not exist" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.548422 4852 scope.go:117] "RemoveContainer" containerID="fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7" Jan 29 12:14:43 crc kubenswrapper[4852]: E0129 12:14:43.548953 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7\": container with ID starting with fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7 not found: ID does not exist" containerID="fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.549002 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7"} err="failed to get container status \"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7\": rpc error: code = NotFound desc = could not find container \"fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7\": container with ID starting with fdb617ef41c7856793320df9a648ea9ca764c10003eb39782bddd59d1288a8a7 not found: ID does not exist" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.549035 4852 scope.go:117] "RemoveContainer" containerID="0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00" Jan 29 12:14:43 crc kubenswrapper[4852]: E0129 12:14:43.549577 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00\": container with ID starting with 0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00 not found: ID does not exist" containerID="0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.549693 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00"} err="failed to get container status \"0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00\": rpc error: code = NotFound desc = could not find container \"0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00\": container with ID starting with 0041dca7817846fe3ee971fbc9db08ef28b225360ad893bc810f7f92f045bc00 not found: ID does not exist" Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.765798 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:43 crc kubenswrapper[4852]: I0129 12:14:43.777863 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k5f9q"] Jan 29 12:14:45 crc kubenswrapper[4852]: I0129 12:14:45.475633 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" path="/var/lib/kubelet/pods/68ccb403-148e-479d-b6fb-8d95abfd92c2/volumes" Jan 29 12:14:59 crc kubenswrapper[4852]: I0129 12:14:59.670972 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:14:59 crc kubenswrapper[4852]: I0129 12:14:59.704563 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7cfd49c966-l87cv" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146318 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn"] Jan 29 12:15:00 crc kubenswrapper[4852]: E0129 12:15:00.146762 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="extract-utilities" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146782 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="extract-utilities" Jan 29 12:15:00 crc kubenswrapper[4852]: E0129 12:15:00.146803 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="registry-server" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146810 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="registry-server" Jan 29 12:15:00 crc kubenswrapper[4852]: E0129 12:15:00.146818 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="extract-content" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146827 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="extract-content" Jan 29 12:15:00 crc kubenswrapper[4852]: E0129 12:15:00.146838 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="init" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146845 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="init" Jan 29 12:15:00 crc kubenswrapper[4852]: E0129 12:15:00.146861 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="dnsmasq-dns" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.146868 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="dnsmasq-dns" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.147055 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="68ccb403-148e-479d-b6fb-8d95abfd92c2" containerName="registry-server" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.147077 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9f1f324-73df-4de1-b3de-2e5b9e8ac4cd" containerName="dnsmasq-dns" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.147783 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.149663 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.150021 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.164280 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn"] Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.304276 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.304338 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.304376 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg7fk\" (UniqueName: \"kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.407274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.407325 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.407363 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg7fk\" (UniqueName: \"kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.408602 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.419500 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.437286 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg7fk\" (UniqueName: \"kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk\") pod \"collect-profiles-29494815-8x2hn\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:00 crc kubenswrapper[4852]: I0129 12:15:00.518675 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:01 crc kubenswrapper[4852]: I0129 12:15:01.006851 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn"] Jan 29 12:15:01 crc kubenswrapper[4852]: I0129 12:15:01.618941 4852 generic.go:334] "Generic (PLEG): container finished" podID="10d09a8a-3459-4001-a749-d405816b1b52" containerID="38d05d57b84e632f95a1aa9a5b62365dd85af5e7b6d56ab4dd850ad69f5e07da" exitCode=0 Jan 29 12:15:01 crc kubenswrapper[4852]: I0129 12:15:01.619024 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" event={"ID":"10d09a8a-3459-4001-a749-d405816b1b52","Type":"ContainerDied","Data":"38d05d57b84e632f95a1aa9a5b62365dd85af5e7b6d56ab4dd850ad69f5e07da"} Jan 29 12:15:01 crc kubenswrapper[4852]: I0129 12:15:01.619242 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" event={"ID":"10d09a8a-3459-4001-a749-d405816b1b52","Type":"ContainerStarted","Data":"6163743dbf1983b55d5d24025640656a92b8a2dddd58d2ae4fa24cfe82fff64d"} Jan 29 12:15:02 crc kubenswrapper[4852]: I0129 12:15:02.965688 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.153807 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg7fk\" (UniqueName: \"kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk\") pod \"10d09a8a-3459-4001-a749-d405816b1b52\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.154299 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume\") pod \"10d09a8a-3459-4001-a749-d405816b1b52\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.154457 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume\") pod \"10d09a8a-3459-4001-a749-d405816b1b52\" (UID: \"10d09a8a-3459-4001-a749-d405816b1b52\") " Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.155380 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume" (OuterVolumeSpecName: "config-volume") pod "10d09a8a-3459-4001-a749-d405816b1b52" (UID: "10d09a8a-3459-4001-a749-d405816b1b52"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.174934 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "10d09a8a-3459-4001-a749-d405816b1b52" (UID: "10d09a8a-3459-4001-a749-d405816b1b52"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.175697 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk" (OuterVolumeSpecName: "kube-api-access-gg7fk") pod "10d09a8a-3459-4001-a749-d405816b1b52" (UID: "10d09a8a-3459-4001-a749-d405816b1b52"). InnerVolumeSpecName "kube-api-access-gg7fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.256940 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg7fk\" (UniqueName: \"kubernetes.io/projected/10d09a8a-3459-4001-a749-d405816b1b52-kube-api-access-gg7fk\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.256987 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/10d09a8a-3459-4001-a749-d405816b1b52-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.257006 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/10d09a8a-3459-4001-a749-d405816b1b52-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.658358 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" event={"ID":"10d09a8a-3459-4001-a749-d405816b1b52","Type":"ContainerDied","Data":"6163743dbf1983b55d5d24025640656a92b8a2dddd58d2ae4fa24cfe82fff64d"} Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.658416 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6163743dbf1983b55d5d24025640656a92b8a2dddd58d2ae4fa24cfe82fff64d" Jan 29 12:15:03 crc kubenswrapper[4852]: I0129 12:15:03.658484 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn" Jan 29 12:15:04 crc kubenswrapper[4852]: I0129 12:15:04.045105 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb"] Jan 29 12:15:04 crc kubenswrapper[4852]: I0129 12:15:04.061752 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494770-rgvbb"] Jan 29 12:15:05 crc kubenswrapper[4852]: I0129 12:15:05.480170 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a0fd4c7-2f4b-482d-8ce6-652785813e37" path="/var/lib/kubelet/pods/4a0fd4c7-2f4b-482d-8ce6-652785813e37/volumes" Jan 29 12:15:20 crc kubenswrapper[4852]: E0129 12:15:20.601744 4852 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:56428->38.102.83.23:36165: write tcp 38.102.83.23:56428->38.102.83.23:36165: write: connection reset by peer Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.032683 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-dq45p"] Jan 29 12:15:23 crc kubenswrapper[4852]: E0129 12:15:23.042136 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10d09a8a-3459-4001-a749-d405816b1b52" containerName="collect-profiles" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.042161 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="10d09a8a-3459-4001-a749-d405816b1b52" containerName="collect-profiles" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.042394 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="10d09a8a-3459-4001-a749-d405816b1b52" containerName="collect-profiles" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.043166 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.058536 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dq45p"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.144665 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-x6fcd"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.145960 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.153573 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-x6fcd"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.159081 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.168786 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4w6b\" (UniqueName: \"kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.168917 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckgpx\" (UniqueName: \"kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.168943 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.269955 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.270056 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4w6b\" (UniqueName: \"kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.270108 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckgpx\" (UniqueName: \"kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.270130 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.270846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.271925 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.292613 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckgpx\" (UniqueName: \"kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx\") pod \"nova-api-db-create-dq45p\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.302700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4w6b\" (UniqueName: \"kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b\") pod \"nova-cell0-db-create-x6fcd\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.327210 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-5a72-account-create-update-ll6z9"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.328485 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.330912 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.344073 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5a72-account-create-update-ll6z9"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.380461 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.381648 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr5ld\" (UniqueName: \"kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.381770 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.418767 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dvpbl"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.420100 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.429969 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dvpbl"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.483896 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr5ld\" (UniqueName: \"kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.484199 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfxg9\" (UniqueName: \"kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.484489 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.484529 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.485476 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.505801 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.508918 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr5ld\" (UniqueName: \"kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld\") pod \"nova-api-5a72-account-create-update-ll6z9\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.551788 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-742c-account-create-update-chpcv"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.558228 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.560078 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.564162 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.587387 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.587621 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxlkv\" (UniqueName: \"kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.587657 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfxg9\" (UniqueName: \"kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.587758 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.588800 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.621358 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfxg9\" (UniqueName: \"kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9\") pod \"nova-cell1-db-create-dvpbl\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.626325 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-742c-account-create-update-chpcv"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.663664 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-fc77-account-create-update-9dmsd"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.664894 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.667916 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.676084 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-fc77-account-create-update-9dmsd"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.690187 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.690497 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxlkv\" (UniqueName: \"kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.690658 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dth7\" (UniqueName: \"kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.690730 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.691531 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.714747 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxlkv\" (UniqueName: \"kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv\") pod \"nova-cell0-742c-account-create-update-chpcv\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.792254 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dth7\" (UniqueName: \"kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.792341 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.793226 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.809528 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dth7\" (UniqueName: \"kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7\") pod \"nova-cell1-fc77-account-create-update-9dmsd\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.885740 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.897215 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dq45p"] Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.901406 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:23 crc kubenswrapper[4852]: W0129 12:15:23.906635 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c3ea17c_df9b_4eb0_8929_9008639d7c79.slice/crio-4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d WatchSource:0}: Error finding container 4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d: Status 404 returned error can't find the container with id 4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d Jan 29 12:15:23 crc kubenswrapper[4852]: I0129 12:15:23.997320 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.070539 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-x6fcd"] Jan 29 12:15:24 crc kubenswrapper[4852]: W0129 12:15:24.081345 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d108b5f_e63a_4812_ae11_eaab4c51fba9.slice/crio-bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6 WatchSource:0}: Error finding container bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6: Status 404 returned error can't find the container with id bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.132547 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5a72-account-create-update-ll6z9"] Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.347755 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dvpbl"] Jan 29 12:15:24 crc kubenswrapper[4852]: W0129 12:15:24.353730 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod549d7cc5_e0ca_4266_b507_a970c408621f.slice/crio-9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e WatchSource:0}: Error finding container 9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e: Status 404 returned error can't find the container with id 9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.481268 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-742c-account-create-update-chpcv"] Jan 29 12:15:24 crc kubenswrapper[4852]: W0129 12:15:24.487229 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d8f8ac_2c54_4c48_b3e4_c9dfc4bb4792.slice/crio-d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d WatchSource:0}: Error finding container d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d: Status 404 returned error can't find the container with id d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.553331 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-fc77-account-create-update-9dmsd"] Jan 29 12:15:24 crc kubenswrapper[4852]: W0129 12:15:24.562214 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62acb4d3_27bc_466d_af1f_35d451699565.slice/crio-aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5 WatchSource:0}: Error finding container aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5: Status 404 returned error can't find the container with id aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.856701 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" event={"ID":"62acb4d3-27bc-466d-af1f-35d451699565","Type":"ContainerStarted","Data":"aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.859153 4852 generic.go:334] "Generic (PLEG): container finished" podID="fb22d7f4-2e3f-4128-acad-5bff9584ebff" containerID="f6f5ea1f9751a4325a9c6225c6c79ac765f8b59a1bf170e96463b8b0a1fe9b32" exitCode=0 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.859249 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5a72-account-create-update-ll6z9" event={"ID":"fb22d7f4-2e3f-4128-acad-5bff9584ebff","Type":"ContainerDied","Data":"f6f5ea1f9751a4325a9c6225c6c79ac765f8b59a1bf170e96463b8b0a1fe9b32"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.859331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5a72-account-create-update-ll6z9" event={"ID":"fb22d7f4-2e3f-4128-acad-5bff9584ebff","Type":"ContainerStarted","Data":"5700920a749a32bcdb881eb858ccc983d8d14bc36f47daed721842e1aa8bd971"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.861408 4852 generic.go:334] "Generic (PLEG): container finished" podID="0d108b5f-e63a-4812-ae11-eaab4c51fba9" containerID="a2fda325225b67306e9adb00fb63c930298a7d27afd6ec1153cc0a58c0845af4" exitCode=0 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.861473 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6fcd" event={"ID":"0d108b5f-e63a-4812-ae11-eaab4c51fba9","Type":"ContainerDied","Data":"a2fda325225b67306e9adb00fb63c930298a7d27afd6ec1153cc0a58c0845af4"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.861498 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6fcd" event={"ID":"0d108b5f-e63a-4812-ae11-eaab4c51fba9","Type":"ContainerStarted","Data":"bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.864432 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-742c-account-create-update-chpcv" event={"ID":"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792","Type":"ContainerStarted","Data":"d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.866060 4852 generic.go:334] "Generic (PLEG): container finished" podID="549d7cc5-e0ca-4266-b507-a970c408621f" containerID="c7ec2c2a09b399f9cadeae8b5a7035024cf020020cf6af995fdf1864715ac53d" exitCode=0 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.866494 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dvpbl" event={"ID":"549d7cc5-e0ca-4266-b507-a970c408621f","Type":"ContainerDied","Data":"c7ec2c2a09b399f9cadeae8b5a7035024cf020020cf6af995fdf1864715ac53d"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.866551 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dvpbl" event={"ID":"549d7cc5-e0ca-4266-b507-a970c408621f","Type":"ContainerStarted","Data":"9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.870641 4852 generic.go:334] "Generic (PLEG): container finished" podID="2c3ea17c-df9b-4eb0-8929-9008639d7c79" containerID="222d44b72ab581c36d25f18b1e87ee20d213b40a99b2f0c79580974d9d54fcc7" exitCode=0 Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.870689 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dq45p" event={"ID":"2c3ea17c-df9b-4eb0-8929-9008639d7c79","Type":"ContainerDied","Data":"222d44b72ab581c36d25f18b1e87ee20d213b40a99b2f0c79580974d9d54fcc7"} Jan 29 12:15:24 crc kubenswrapper[4852]: I0129 12:15:24.870714 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dq45p" event={"ID":"2c3ea17c-df9b-4eb0-8929-9008639d7c79","Type":"ContainerStarted","Data":"4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d"} Jan 29 12:15:25 crc kubenswrapper[4852]: I0129 12:15:25.884183 4852 generic.go:334] "Generic (PLEG): container finished" podID="37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" containerID="542047c4ada8f43f614a05e9be497edca18f831d1275ce9280591ff508071350" exitCode=0 Jan 29 12:15:25 crc kubenswrapper[4852]: I0129 12:15:25.884566 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-742c-account-create-update-chpcv" event={"ID":"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792","Type":"ContainerDied","Data":"542047c4ada8f43f614a05e9be497edca18f831d1275ce9280591ff508071350"} Jan 29 12:15:25 crc kubenswrapper[4852]: I0129 12:15:25.888849 4852 generic.go:334] "Generic (PLEG): container finished" podID="62acb4d3-27bc-466d-af1f-35d451699565" containerID="9dd4d775f342b430f2ec9a1b3e48f1e433f665ccb9498108d98cb67709598515" exitCode=0 Jan 29 12:15:25 crc kubenswrapper[4852]: I0129 12:15:25.889049 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" event={"ID":"62acb4d3-27bc-466d-af1f-35d451699565","Type":"ContainerDied","Data":"9dd4d775f342b430f2ec9a1b3e48f1e433f665ccb9498108d98cb67709598515"} Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.299029 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.427659 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.435429 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.444816 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts\") pod \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.445087 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4w6b\" (UniqueName: \"kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b\") pod \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\" (UID: \"0d108b5f-e63a-4812-ae11-eaab4c51fba9\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.445768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d108b5f-e63a-4812-ae11-eaab4c51fba9" (UID: "0d108b5f-e63a-4812-ae11-eaab4c51fba9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.446099 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d108b5f-e63a-4812-ae11-eaab4c51fba9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.446264 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.453626 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b" (OuterVolumeSpecName: "kube-api-access-g4w6b") pod "0d108b5f-e63a-4812-ae11-eaab4c51fba9" (UID: "0d108b5f-e63a-4812-ae11-eaab4c51fba9"). InnerVolumeSpecName "kube-api-access-g4w6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547537 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts\") pod \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547641 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr5ld\" (UniqueName: \"kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld\") pod \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\" (UID: \"fb22d7f4-2e3f-4128-acad-5bff9584ebff\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547710 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts\") pod \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547899 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfxg9\" (UniqueName: \"kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9\") pod \"549d7cc5-e0ca-4266-b507-a970c408621f\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547965 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckgpx\" (UniqueName: \"kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx\") pod \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\" (UID: \"2c3ea17c-df9b-4eb0-8929-9008639d7c79\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.547988 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts\") pod \"549d7cc5-e0ca-4266-b507-a970c408621f\" (UID: \"549d7cc5-e0ca-4266-b507-a970c408621f\") " Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.548449 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4w6b\" (UniqueName: \"kubernetes.io/projected/0d108b5f-e63a-4812-ae11-eaab4c51fba9-kube-api-access-g4w6b\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.548523 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb22d7f4-2e3f-4128-acad-5bff9584ebff" (UID: "fb22d7f4-2e3f-4128-acad-5bff9584ebff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.548524 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2c3ea17c-df9b-4eb0-8929-9008639d7c79" (UID: "2c3ea17c-df9b-4eb0-8929-9008639d7c79"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.548638 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "549d7cc5-e0ca-4266-b507-a970c408621f" (UID: "549d7cc5-e0ca-4266-b507-a970c408621f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.554617 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx" (OuterVolumeSpecName: "kube-api-access-ckgpx") pod "2c3ea17c-df9b-4eb0-8929-9008639d7c79" (UID: "2c3ea17c-df9b-4eb0-8929-9008639d7c79"). InnerVolumeSpecName "kube-api-access-ckgpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.555752 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9" (OuterVolumeSpecName: "kube-api-access-jfxg9") pod "549d7cc5-e0ca-4266-b507-a970c408621f" (UID: "549d7cc5-e0ca-4266-b507-a970c408621f"). InnerVolumeSpecName "kube-api-access-jfxg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.566075 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld" (OuterVolumeSpecName: "kube-api-access-kr5ld") pod "fb22d7f4-2e3f-4128-acad-5bff9584ebff" (UID: "fb22d7f4-2e3f-4128-acad-5bff9584ebff"). InnerVolumeSpecName "kube-api-access-kr5ld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651029 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb22d7f4-2e3f-4128-acad-5bff9584ebff-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651065 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr5ld\" (UniqueName: \"kubernetes.io/projected/fb22d7f4-2e3f-4128-acad-5bff9584ebff-kube-api-access-kr5ld\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651075 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c3ea17c-df9b-4eb0-8929-9008639d7c79-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651085 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfxg9\" (UniqueName: \"kubernetes.io/projected/549d7cc5-e0ca-4266-b507-a970c408621f-kube-api-access-jfxg9\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651093 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckgpx\" (UniqueName: \"kubernetes.io/projected/2c3ea17c-df9b-4eb0-8929-9008639d7c79-kube-api-access-ckgpx\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.651102 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/549d7cc5-e0ca-4266-b507-a970c408621f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.898668 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5a72-account-create-update-ll6z9" event={"ID":"fb22d7f4-2e3f-4128-acad-5bff9584ebff","Type":"ContainerDied","Data":"5700920a749a32bcdb881eb858ccc983d8d14bc36f47daed721842e1aa8bd971"} Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.898704 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5700920a749a32bcdb881eb858ccc983d8d14bc36f47daed721842e1aa8bd971" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.898723 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5a72-account-create-update-ll6z9" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.901327 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6fcd" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.901293 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6fcd" event={"ID":"0d108b5f-e63a-4812-ae11-eaab4c51fba9","Type":"ContainerDied","Data":"bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6"} Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.901719 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc3e043e75f7f1fb23c50d49728e1c8ec8848b9e2be97553574e157c4652f0c6" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.903214 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dvpbl" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.903220 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dvpbl" event={"ID":"549d7cc5-e0ca-4266-b507-a970c408621f","Type":"ContainerDied","Data":"9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e"} Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.903362 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d80c43d45f7164d0e3a47a701befb07f4ec60491d743fa65460ffa827ce5e1e" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.904875 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dq45p" Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.909338 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dq45p" event={"ID":"2c3ea17c-df9b-4eb0-8929-9008639d7c79","Type":"ContainerDied","Data":"4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d"} Jan 29 12:15:26 crc kubenswrapper[4852]: I0129 12:15:26.909396 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c1a66420fb802fcec081b650997c0a6ac0e604b3988af7dc47da467416b867d" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.280668 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.326011 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.465470 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dth7\" (UniqueName: \"kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7\") pod \"62acb4d3-27bc-466d-af1f-35d451699565\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.465652 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts\") pod \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.465759 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxlkv\" (UniqueName: \"kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv\") pod \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\" (UID: \"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792\") " Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.465798 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts\") pod \"62acb4d3-27bc-466d-af1f-35d451699565\" (UID: \"62acb4d3-27bc-466d-af1f-35d451699565\") " Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.466254 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" (UID: "37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.467199 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62acb4d3-27bc-466d-af1f-35d451699565" (UID: "62acb4d3-27bc-466d-af1f-35d451699565"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.471714 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv" (OuterVolumeSpecName: "kube-api-access-lxlkv") pod "37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" (UID: "37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792"). InnerVolumeSpecName "kube-api-access-lxlkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.472056 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7" (OuterVolumeSpecName: "kube-api-access-8dth7") pod "62acb4d3-27bc-466d-af1f-35d451699565" (UID: "62acb4d3-27bc-466d-af1f-35d451699565"). InnerVolumeSpecName "kube-api-access-8dth7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.568243 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dth7\" (UniqueName: \"kubernetes.io/projected/62acb4d3-27bc-466d-af1f-35d451699565-kube-api-access-8dth7\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.568519 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.568530 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxlkv\" (UniqueName: \"kubernetes.io/projected/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792-kube-api-access-lxlkv\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.568538 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62acb4d3-27bc-466d-af1f-35d451699565-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.914543 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-742c-account-create-update-chpcv" event={"ID":"37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792","Type":"ContainerDied","Data":"d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d"} Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.914572 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-742c-account-create-update-chpcv" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.914595 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1048c93f1efd321cdac2c9be7de0b98e9d923844b535507d890156da65e2b4d" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.916325 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.916317 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fc77-account-create-update-9dmsd" event={"ID":"62acb4d3-27bc-466d-af1f-35d451699565","Type":"ContainerDied","Data":"aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5"} Jan 29 12:15:27 crc kubenswrapper[4852]: I0129 12:15:27.916460 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa9f931f6e13cf81d6c30a7df29f77c5dc14611281b9b93068e23ebe9ad8f4a5" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.913517 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qlz62"] Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.913979 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62acb4d3-27bc-466d-af1f-35d451699565" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914001 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="62acb4d3-27bc-466d-af1f-35d451699565" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.914023 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="549d7cc5-e0ca-4266-b507-a970c408621f" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914043 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="549d7cc5-e0ca-4266-b507-a970c408621f" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.914059 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb22d7f4-2e3f-4128-acad-5bff9584ebff" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914068 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb22d7f4-2e3f-4128-acad-5bff9584ebff" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.914084 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d108b5f-e63a-4812-ae11-eaab4c51fba9" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914091 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d108b5f-e63a-4812-ae11-eaab4c51fba9" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.914113 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914121 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: E0129 12:15:28.914134 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3ea17c-df9b-4eb0-8929-9008639d7c79" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914142 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3ea17c-df9b-4eb0-8929-9008639d7c79" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914370 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb22d7f4-2e3f-4128-acad-5bff9584ebff" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914393 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3ea17c-df9b-4eb0-8929-9008639d7c79" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914407 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914423 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="62acb4d3-27bc-466d-af1f-35d451699565" containerName="mariadb-account-create-update" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914438 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d108b5f-e63a-4812-ae11-eaab4c51fba9" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.914445 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="549d7cc5-e0ca-4266-b507-a970c408621f" containerName="mariadb-database-create" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.915968 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.918369 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hk9sl" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.918606 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.918874 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 29 12:15:28 crc kubenswrapper[4852]: I0129 12:15:28.960811 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qlz62"] Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.091199 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.091315 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tbpn\" (UniqueName: \"kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.091482 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.091527 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.194067 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.194209 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tbpn\" (UniqueName: \"kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.194267 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.194291 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.201010 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.205380 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.208105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.212533 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tbpn\" (UniqueName: \"kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn\") pod \"nova-cell0-conductor-db-sync-qlz62\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.244328 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.549494 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qlz62"] Jan 29 12:15:29 crc kubenswrapper[4852]: W0129 12:15:29.554193 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a883122_db7e_4602_b475_b2763b8e9e41.slice/crio-cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc WatchSource:0}: Error finding container cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc: Status 404 returned error can't find the container with id cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.950744 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qlz62" event={"ID":"3a883122-db7e-4602-b475-b2763b8e9e41","Type":"ContainerStarted","Data":"cbdf9ac17b2093fe58616ffade16c7c0346d3035d957e0515dd48f990d67c315"} Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.950789 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qlz62" event={"ID":"3a883122-db7e-4602-b475-b2763b8e9e41","Type":"ContainerStarted","Data":"cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc"} Jan 29 12:15:29 crc kubenswrapper[4852]: I0129 12:15:29.965071 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-qlz62" podStartSLOduration=1.965051482 podStartE2EDuration="1.965051482s" podCreationTimestamp="2026-01-29 12:15:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:29.964738094 +0000 UTC m=+5627.182069268" watchObservedRunningTime="2026-01-29 12:15:29.965051482 +0000 UTC m=+5627.182382626" Jan 29 12:15:30 crc kubenswrapper[4852]: I0129 12:15:30.017186 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:15:30 crc kubenswrapper[4852]: I0129 12:15:30.017262 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:15:36 crc kubenswrapper[4852]: I0129 12:15:36.012621 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qlz62" event={"ID":"3a883122-db7e-4602-b475-b2763b8e9e41","Type":"ContainerDied","Data":"cbdf9ac17b2093fe58616ffade16c7c0346d3035d957e0515dd48f990d67c315"} Jan 29 12:15:36 crc kubenswrapper[4852]: I0129 12:15:36.013954 4852 generic.go:334] "Generic (PLEG): container finished" podID="3a883122-db7e-4602-b475-b2763b8e9e41" containerID="cbdf9ac17b2093fe58616ffade16c7c0346d3035d957e0515dd48f990d67c315" exitCode=0 Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.325293 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.456863 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data\") pod \"3a883122-db7e-4602-b475-b2763b8e9e41\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.456954 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts\") pod \"3a883122-db7e-4602-b475-b2763b8e9e41\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.457000 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle\") pod \"3a883122-db7e-4602-b475-b2763b8e9e41\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.457045 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tbpn\" (UniqueName: \"kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn\") pod \"3a883122-db7e-4602-b475-b2763b8e9e41\" (UID: \"3a883122-db7e-4602-b475-b2763b8e9e41\") " Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.462700 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn" (OuterVolumeSpecName: "kube-api-access-4tbpn") pod "3a883122-db7e-4602-b475-b2763b8e9e41" (UID: "3a883122-db7e-4602-b475-b2763b8e9e41"). InnerVolumeSpecName "kube-api-access-4tbpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.462836 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts" (OuterVolumeSpecName: "scripts") pod "3a883122-db7e-4602-b475-b2763b8e9e41" (UID: "3a883122-db7e-4602-b475-b2763b8e9e41"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.488019 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a883122-db7e-4602-b475-b2763b8e9e41" (UID: "3a883122-db7e-4602-b475-b2763b8e9e41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.494264 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data" (OuterVolumeSpecName: "config-data") pod "3a883122-db7e-4602-b475-b2763b8e9e41" (UID: "3a883122-db7e-4602-b475-b2763b8e9e41"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.558743 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.558771 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.558783 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tbpn\" (UniqueName: \"kubernetes.io/projected/3a883122-db7e-4602-b475-b2763b8e9e41-kube-api-access-4tbpn\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:37 crc kubenswrapper[4852]: I0129 12:15:37.558794 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a883122-db7e-4602-b475-b2763b8e9e41-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.038969 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qlz62" event={"ID":"3a883122-db7e-4602-b475-b2763b8e9e41","Type":"ContainerDied","Data":"cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc"} Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.039286 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd595e5db1976a70c6dc2a7dff9afb0d4094ac93b2409c7c99af3db8011e6cbc" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.039299 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qlz62" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.141512 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:15:38 crc kubenswrapper[4852]: E0129 12:15:38.142023 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a883122-db7e-4602-b475-b2763b8e9e41" containerName="nova-cell0-conductor-db-sync" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.142046 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a883122-db7e-4602-b475-b2763b8e9e41" containerName="nova-cell0-conductor-db-sync" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.142239 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a883122-db7e-4602-b475-b2763b8e9e41" containerName="nova-cell0-conductor-db-sync" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.143067 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.145488 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hk9sl" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.145834 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.151541 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.271268 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.271507 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.271576 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njf9q\" (UniqueName: \"kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.373877 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.374069 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njf9q\" (UniqueName: \"kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.374179 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.379641 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.380400 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.393065 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njf9q\" (UniqueName: \"kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q\") pod \"nova-cell0-conductor-0\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.489362 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:38 crc kubenswrapper[4852]: I0129 12:15:38.966900 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:15:39 crc kubenswrapper[4852]: I0129 12:15:39.047521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8634fab6-800d-4f0a-8c2f-4b22b89048ae","Type":"ContainerStarted","Data":"7779d743bca3a3e442dc94747bf4542fc7e7e7416909708c0ab646e5aa5d3009"} Jan 29 12:15:40 crc kubenswrapper[4852]: I0129 12:15:40.059791 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8634fab6-800d-4f0a-8c2f-4b22b89048ae","Type":"ContainerStarted","Data":"a40bffd2289531afe6b19a82ce979b4101f83c743a5faab56ca0481408e45396"} Jan 29 12:15:40 crc kubenswrapper[4852]: I0129 12:15:40.060231 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:40 crc kubenswrapper[4852]: I0129 12:15:40.083462 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.083439006 podStartE2EDuration="2.083439006s" podCreationTimestamp="2026-01-29 12:15:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:40.0766242 +0000 UTC m=+5637.293955344" watchObservedRunningTime="2026-01-29 12:15:40.083439006 +0000 UTC m=+5637.300770150" Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.520497 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.946872 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-rvw7r"] Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.947952 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.951605 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.952378 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 29 12:15:48 crc kubenswrapper[4852]: I0129 12:15:48.966571 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rvw7r"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.079048 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.079117 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbpxw\" (UniqueName: \"kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.079165 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.079193 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.102172 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.103375 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.113957 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.135262 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.171864 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.179168 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.180929 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.181082 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.183199 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.183433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbpxw\" (UniqueName: \"kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.183606 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.203373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.209578 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.211835 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.235981 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.240402 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbpxw\" (UniqueName: \"kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw\") pod \"nova-cell0-cell-mapping-rvw7r\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.250436 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.251710 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.259553 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.259660 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.275973 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.293732 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294189 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294259 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gldrg\" (UniqueName: \"kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294373 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5rzc\" (UniqueName: \"kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294391 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.294420 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.330240 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.331654 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.336734 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395708 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395763 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gldrg\" (UniqueName: \"kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395792 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395809 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5rzc\" (UniqueName: \"kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395838 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395886 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395918 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395936 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.395963 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmfx8\" (UniqueName: \"kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.396012 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.402349 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.403014 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.405042 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.410255 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.410823 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.418211 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.451753 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gldrg\" (UniqueName: \"kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg\") pod \"nova-api-0\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.461106 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5rzc\" (UniqueName: \"kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc\") pod \"nova-scheduler-0\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.461977 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.464539 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.495576 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.500720 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.500800 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.500859 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmfx8\" (UniqueName: \"kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.500908 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhfbj\" (UniqueName: \"kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.500975 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.501021 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.501068 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.554793 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmfx8\" (UniqueName: \"kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.560983 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.583345 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.589459 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.655522 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhfbj\" (UniqueName: \"kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.655627 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.656729 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.656771 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtv76\" (UniqueName: \"kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.656823 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.656904 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.656940 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.657021 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.657083 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.659547 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.669345 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.671476 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.685556 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhfbj\" (UniqueName: \"kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj\") pod \"nova-metadata-0\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.712520 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.729099 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.742815 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.742874 4852 scope.go:117] "RemoveContainer" containerID="baf1bc54c8dacac205c5821c18da0d26bfd351c4c0978a7464cf00035aca6487" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.758871 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.758922 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.758980 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtv76\" (UniqueName: \"kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.759834 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.759868 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.760074 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.760171 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.761003 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.761380 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.788318 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtv76\" (UniqueName: \"kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76\") pod \"dnsmasq-dns-775f85d44f-s8cqk\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.843711 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:49 crc kubenswrapper[4852]: I0129 12:15:49.884367 4852 scope.go:117] "RemoveContainer" containerID="bf0e68ca105d64c3d1b76652e2bcbb9cad83f2f2efa5b2d0b1a4abf35094d406" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.017411 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rvw7r"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.162111 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rvw7r" event={"ID":"3355180f-9d97-4440-aa5c-5319273300f7","Type":"ContainerStarted","Data":"2d9501faf3dcb3a858524ac6dfa4febc03240b3278d8bf0387e4d4545b4ee486"} Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.194019 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.277822 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qtwww"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.279171 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.283536 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.284484 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.291428 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qtwww"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.382372 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.382452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.382622 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.382948 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g2zc\" (UniqueName: \"kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: W0129 12:15:50.388518 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadd26b63_e3cc_42bc_9e83_5769bf7caa4c.slice/crio-04ff5f9d04aa83b92f435f5775dcc804ebf69bccc48891041350a2a45af7d1ed WatchSource:0}: Error finding container 04ff5f9d04aa83b92f435f5775dcc804ebf69bccc48891041350a2a45af7d1ed: Status 404 returned error can't find the container with id 04ff5f9d04aa83b92f435f5775dcc804ebf69bccc48891041350a2a45af7d1ed Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.393884 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.420370 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.474259 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.483754 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g2zc\" (UniqueName: \"kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.483841 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.483860 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.483931 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.491063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.494288 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.498132 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.502462 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g2zc\" (UniqueName: \"kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc\") pod \"nova-cell1-conductor-db-sync-qtwww\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.571648 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:50 crc kubenswrapper[4852]: W0129 12:15:50.584292 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55dd6817_4b60_42b9_a66c_e3a2cfb73f4d.slice/crio-1c985d01af6af7c1290a0b574a4296a23eba905c6cd804ced7c0d306049af1e0 WatchSource:0}: Error finding container 1c985d01af6af7c1290a0b574a4296a23eba905c6cd804ced7c0d306049af1e0: Status 404 returned error can't find the container with id 1c985d01af6af7c1290a0b574a4296a23eba905c6cd804ced7c0d306049af1e0 Jan 29 12:15:50 crc kubenswrapper[4852]: I0129 12:15:50.682785 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.171969 4852 generic.go:334] "Generic (PLEG): container finished" podID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerID="cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1" exitCode=0 Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.172040 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" event={"ID":"81e8dc7c-ee39-48d3-bb61-56822eddf64e","Type":"ContainerDied","Data":"cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.172085 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" event={"ID":"81e8dc7c-ee39-48d3-bb61-56822eddf64e","Type":"ContainerStarted","Data":"41abbbb4918c40fa324a39d66d98f276e87e6381f0dc6d76ba73557584eb02df"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.176619 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rvw7r" event={"ID":"3355180f-9d97-4440-aa5c-5319273300f7","Type":"ContainerStarted","Data":"cccacb92aaa13713e9afaaf3151b67a846e605d3abdb81ff82a6f4098d722da6"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.180707 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerStarted","Data":"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.180955 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerStarted","Data":"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.181062 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerStarted","Data":"1c985d01af6af7c1290a0b574a4296a23eba905c6cd804ced7c0d306049af1e0"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.187795 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"650ab9c7-7090-42ef-bbe7-4684c333bdfe","Type":"ContainerStarted","Data":"5b7731e49a8c8c0e1f73d4cb50bc8573667836fd1987525884c024eab817f0da"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.187857 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"650ab9c7-7090-42ef-bbe7-4684c333bdfe","Type":"ContainerStarted","Data":"669bd5a8599f87c868a0baa52eeac30d515144f97d2b60cfb1ab491e3bcb5da6"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.203092 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerStarted","Data":"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.203325 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerStarted","Data":"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.203437 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerStarted","Data":"04ba8f1cf2893e7f037d83b1a0eab44b66ac5d527512368ac0cd63e9ce53b82a"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.205769 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"add26b63-e3cc-42bc-9e83-5769bf7caa4c","Type":"ContainerStarted","Data":"a2b60bffa2e680b703b84804f33a875aa7c20dc160f72f720cc6cc42498264df"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.205943 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"add26b63-e3cc-42bc-9e83-5769bf7caa4c","Type":"ContainerStarted","Data":"04ff5f9d04aa83b92f435f5775dcc804ebf69bccc48891041350a2a45af7d1ed"} Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.224015 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-rvw7r" podStartSLOduration=3.223994854 podStartE2EDuration="3.223994854s" podCreationTimestamp="2026-01-29 12:15:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:51.215418844 +0000 UTC m=+5648.432749968" watchObservedRunningTime="2026-01-29 12:15:51.223994854 +0000 UTC m=+5648.441325978" Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.244378 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.24435243 podStartE2EDuration="2.24435243s" podCreationTimestamp="2026-01-29 12:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:51.235949535 +0000 UTC m=+5648.453280669" watchObservedRunningTime="2026-01-29 12:15:51.24435243 +0000 UTC m=+5648.461683564" Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.267676 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qtwww"] Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.282198 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.282173152 podStartE2EDuration="2.282173152s" podCreationTimestamp="2026-01-29 12:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:51.281516777 +0000 UTC m=+5648.498847911" watchObservedRunningTime="2026-01-29 12:15:51.282173152 +0000 UTC m=+5648.499504286" Jan 29 12:15:51 crc kubenswrapper[4852]: I0129 12:15:51.314959 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.314943071 podStartE2EDuration="2.314943071s" podCreationTimestamp="2026-01-29 12:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:51.310200486 +0000 UTC m=+5648.527531630" watchObservedRunningTime="2026-01-29 12:15:51.314943071 +0000 UTC m=+5648.532274205" Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.230883 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qtwww" event={"ID":"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229","Type":"ContainerStarted","Data":"9c3c5e0d3579881f28eee44c35473d0870f02263cdc8b8b11aab1c48e7619d5e"} Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.232899 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qtwww" event={"ID":"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229","Type":"ContainerStarted","Data":"e7ec7562ced90125c62d667012630d02e8cf90173347f9ccd8b44737aea64abc"} Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.233078 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" event={"ID":"81e8dc7c-ee39-48d3-bb61-56822eddf64e","Type":"ContainerStarted","Data":"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331"} Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.287209 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-qtwww" podStartSLOduration=2.287067298 podStartE2EDuration="2.287067298s" podCreationTimestamp="2026-01-29 12:15:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:52.250291561 +0000 UTC m=+5649.467622715" watchObservedRunningTime="2026-01-29 12:15:52.287067298 +0000 UTC m=+5649.504398462" Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.306441 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" podStartSLOduration=3.3064236 podStartE2EDuration="3.3064236s" podCreationTimestamp="2026-01-29 12:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:52.268193567 +0000 UTC m=+5649.485524701" watchObservedRunningTime="2026-01-29 12:15:52.3064236 +0000 UTC m=+5649.523754734" Jan 29 12:15:52 crc kubenswrapper[4852]: I0129 12:15:52.313471 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.313459922 podStartE2EDuration="3.313459922s" podCreationTimestamp="2026-01-29 12:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:52.293984227 +0000 UTC m=+5649.511315371" watchObservedRunningTime="2026-01-29 12:15:52.313459922 +0000 UTC m=+5649.530791056" Jan 29 12:15:53 crc kubenswrapper[4852]: I0129 12:15:53.240568 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:54 crc kubenswrapper[4852]: I0129 12:15:54.713302 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:54 crc kubenswrapper[4852]: I0129 12:15:54.730556 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 12:15:54 crc kubenswrapper[4852]: I0129 12:15:54.744226 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:15:54 crc kubenswrapper[4852]: I0129 12:15:54.744694 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:15:55 crc kubenswrapper[4852]: I0129 12:15:55.261413 4852 generic.go:334] "Generic (PLEG): container finished" podID="19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" containerID="9c3c5e0d3579881f28eee44c35473d0870f02263cdc8b8b11aab1c48e7619d5e" exitCode=0 Jan 29 12:15:55 crc kubenswrapper[4852]: I0129 12:15:55.261610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qtwww" event={"ID":"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229","Type":"ContainerDied","Data":"9c3c5e0d3579881f28eee44c35473d0870f02263cdc8b8b11aab1c48e7619d5e"} Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.282774 4852 generic.go:334] "Generic (PLEG): container finished" podID="3355180f-9d97-4440-aa5c-5319273300f7" containerID="cccacb92aaa13713e9afaaf3151b67a846e605d3abdb81ff82a6f4098d722da6" exitCode=0 Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.283044 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rvw7r" event={"ID":"3355180f-9d97-4440-aa5c-5319273300f7","Type":"ContainerDied","Data":"cccacb92aaa13713e9afaaf3151b67a846e605d3abdb81ff82a6f4098d722da6"} Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.624714 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.820934 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data\") pod \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.821010 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle\") pod \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.821081 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8g2zc\" (UniqueName: \"kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc\") pod \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.821234 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts\") pod \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\" (UID: \"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229\") " Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.826553 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc" (OuterVolumeSpecName: "kube-api-access-8g2zc") pod "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" (UID: "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229"). InnerVolumeSpecName "kube-api-access-8g2zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.834103 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts" (OuterVolumeSpecName: "scripts") pod "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" (UID: "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.849797 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data" (OuterVolumeSpecName: "config-data") pod "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" (UID: "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.852315 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" (UID: "19f8bd0d-d739-4d6e-a48a-e3cbb0f64229"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.929733 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.929768 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.929781 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:56 crc kubenswrapper[4852]: I0129 12:15:56.929794 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8g2zc\" (UniqueName: \"kubernetes.io/projected/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229-kube-api-access-8g2zc\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.300685 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qtwww" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.301958 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qtwww" event={"ID":"19f8bd0d-d739-4d6e-a48a-e3cbb0f64229","Type":"ContainerDied","Data":"e7ec7562ced90125c62d667012630d02e8cf90173347f9ccd8b44737aea64abc"} Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.302018 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7ec7562ced90125c62d667012630d02e8cf90173347f9ccd8b44737aea64abc" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.378343 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:15:57 crc kubenswrapper[4852]: E0129 12:15:57.378747 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" containerName="nova-cell1-conductor-db-sync" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.378765 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" containerName="nova-cell1-conductor-db-sync" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.378960 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" containerName="nova-cell1-conductor-db-sync" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.379561 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.387750 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.395730 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.546503 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.546905 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.546950 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bkft\" (UniqueName: \"kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.649121 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.649223 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.649258 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bkft\" (UniqueName: \"kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.653864 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.656950 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.676775 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bkft\" (UniqueName: \"kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft\") pod \"nova-cell1-conductor-0\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.706253 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.818996 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.954484 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbpxw\" (UniqueName: \"kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw\") pod \"3355180f-9d97-4440-aa5c-5319273300f7\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.954539 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data\") pod \"3355180f-9d97-4440-aa5c-5319273300f7\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.954666 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle\") pod \"3355180f-9d97-4440-aa5c-5319273300f7\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.954690 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts\") pod \"3355180f-9d97-4440-aa5c-5319273300f7\" (UID: \"3355180f-9d97-4440-aa5c-5319273300f7\") " Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.959216 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw" (OuterVolumeSpecName: "kube-api-access-xbpxw") pod "3355180f-9d97-4440-aa5c-5319273300f7" (UID: "3355180f-9d97-4440-aa5c-5319273300f7"). InnerVolumeSpecName "kube-api-access-xbpxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.960203 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts" (OuterVolumeSpecName: "scripts") pod "3355180f-9d97-4440-aa5c-5319273300f7" (UID: "3355180f-9d97-4440-aa5c-5319273300f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.978213 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3355180f-9d97-4440-aa5c-5319273300f7" (UID: "3355180f-9d97-4440-aa5c-5319273300f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:57 crc kubenswrapper[4852]: I0129 12:15:57.985624 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data" (OuterVolumeSpecName: "config-data") pod "3355180f-9d97-4440-aa5c-5319273300f7" (UID: "3355180f-9d97-4440-aa5c-5319273300f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.056477 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbpxw\" (UniqueName: \"kubernetes.io/projected/3355180f-9d97-4440-aa5c-5319273300f7-kube-api-access-xbpxw\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.056519 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.056532 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.056543 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3355180f-9d97-4440-aa5c-5319273300f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.188146 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.312813 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rvw7r" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.313736 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rvw7r" event={"ID":"3355180f-9d97-4440-aa5c-5319273300f7","Type":"ContainerDied","Data":"2d9501faf3dcb3a858524ac6dfa4febc03240b3278d8bf0387e4d4545b4ee486"} Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.313794 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d9501faf3dcb3a858524ac6dfa4febc03240b3278d8bf0387e4d4545b4ee486" Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.315257 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8c5f61b-27a7-4756-996f-b58b641a2ebc","Type":"ContainerStarted","Data":"9f75cc94b449505d26441d8e1ea3e3846db37769cdd558463290c7bfb9857edb"} Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.492332 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.492758 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-log" containerID="cri-o://c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" gracePeriod=30 Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.493116 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-api" containerID="cri-o://39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" gracePeriod=30 Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.520278 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.520509 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" containerName="nova-scheduler-scheduler" containerID="cri-o://5b7731e49a8c8c0e1f73d4cb50bc8573667836fd1987525884c024eab817f0da" gracePeriod=30 Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.597514 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.597850 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-log" containerID="cri-o://ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" gracePeriod=30 Jan 29 12:15:58 crc kubenswrapper[4852]: I0129 12:15:58.597909 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-metadata" containerID="cri-o://f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" gracePeriod=30 Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.143035 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.183575 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.281773 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs\") pod \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.281848 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle\") pod \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.281912 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs\") pod \"ea4e12d2-11d9-4251-9588-34974b5f9989\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.281934 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data\") pod \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282003 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gldrg\" (UniqueName: \"kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg\") pod \"ea4e12d2-11d9-4251-9588-34974b5f9989\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282089 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data\") pod \"ea4e12d2-11d9-4251-9588-34974b5f9989\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282105 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs" (OuterVolumeSpecName: "logs") pod "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" (UID: "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282159 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle\") pod \"ea4e12d2-11d9-4251-9588-34974b5f9989\" (UID: \"ea4e12d2-11d9-4251-9588-34974b5f9989\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282187 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhfbj\" (UniqueName: \"kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj\") pod \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\" (UID: \"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d\") " Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs" (OuterVolumeSpecName: "logs") pod "ea4e12d2-11d9-4251-9588-34974b5f9989" (UID: "ea4e12d2-11d9-4251-9588-34974b5f9989"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282848 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.282872 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea4e12d2-11d9-4251-9588-34974b5f9989-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.292458 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg" (OuterVolumeSpecName: "kube-api-access-gldrg") pod "ea4e12d2-11d9-4251-9588-34974b5f9989" (UID: "ea4e12d2-11d9-4251-9588-34974b5f9989"). InnerVolumeSpecName "kube-api-access-gldrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.292509 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj" (OuterVolumeSpecName: "kube-api-access-jhfbj") pod "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" (UID: "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d"). InnerVolumeSpecName "kube-api-access-jhfbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.305857 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data" (OuterVolumeSpecName: "config-data") pod "ea4e12d2-11d9-4251-9588-34974b5f9989" (UID: "ea4e12d2-11d9-4251-9588-34974b5f9989"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.307427 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea4e12d2-11d9-4251-9588-34974b5f9989" (UID: "ea4e12d2-11d9-4251-9588-34974b5f9989"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.307979 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data" (OuterVolumeSpecName: "config-data") pod "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" (UID: "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.314814 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" (UID: "55dd6817-4b60-42b9-a66c-e3a2cfb73f4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334823 4852 generic.go:334] "Generic (PLEG): container finished" podID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerID="39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" exitCode=0 Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334860 4852 generic.go:334] "Generic (PLEG): container finished" podID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerID="c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" exitCode=143 Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334903 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerDied","Data":"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334932 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerDied","Data":"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334944 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ea4e12d2-11d9-4251-9588-34974b5f9989","Type":"ContainerDied","Data":"04ba8f1cf2893e7f037d83b1a0eab44b66ac5d527512368ac0cd63e9ce53b82a"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.334960 4852 scope.go:117] "RemoveContainer" containerID="39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.335088 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.344868 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8c5f61b-27a7-4756-996f-b58b641a2ebc","Type":"ContainerStarted","Data":"b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.345218 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347014 4852 generic.go:334] "Generic (PLEG): container finished" podID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerID="f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" exitCode=0 Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347048 4852 generic.go:334] "Generic (PLEG): container finished" podID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerID="ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" exitCode=143 Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerDied","Data":"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347094 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerDied","Data":"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347108 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"55dd6817-4b60-42b9-a66c-e3a2cfb73f4d","Type":"ContainerDied","Data":"1c985d01af6af7c1290a0b574a4296a23eba905c6cd804ced7c0d306049af1e0"} Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.347171 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.372869 4852 scope.go:117] "RemoveContainer" containerID="c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387423 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387467 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gldrg\" (UniqueName: \"kubernetes.io/projected/ea4e12d2-11d9-4251-9588-34974b5f9989-kube-api-access-gldrg\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387487 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387497 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea4e12d2-11d9-4251-9588-34974b5f9989-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387506 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhfbj\" (UniqueName: \"kubernetes.io/projected/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-kube-api-access-jhfbj\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.387518 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.418485 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.418466402 podStartE2EDuration="2.418466402s" podCreationTimestamp="2026-01-29 12:15:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:59.359411802 +0000 UTC m=+5656.576742956" watchObservedRunningTime="2026-01-29 12:15:59.418466402 +0000 UTC m=+5656.635797536" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.429356 4852 scope.go:117] "RemoveContainer" containerID="39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.429803 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532\": container with ID starting with 39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532 not found: ID does not exist" containerID="39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.429844 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532"} err="failed to get container status \"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532\": rpc error: code = NotFound desc = could not find container \"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532\": container with ID starting with 39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.429880 4852 scope.go:117] "RemoveContainer" containerID="c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.430099 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8\": container with ID starting with c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8 not found: ID does not exist" containerID="c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430122 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8"} err="failed to get container status \"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8\": rpc error: code = NotFound desc = could not find container \"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8\": container with ID starting with c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430137 4852 scope.go:117] "RemoveContainer" containerID="39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430311 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532"} err="failed to get container status \"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532\": rpc error: code = NotFound desc = could not find container \"39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532\": container with ID starting with 39e24cd2ba0176b6721507195b5b5600a3a34b50f4d02188ab087b306b5e2532 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430327 4852 scope.go:117] "RemoveContainer" containerID="c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430531 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8"} err="failed to get container status \"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8\": rpc error: code = NotFound desc = could not find container \"c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8\": container with ID starting with c39670e1d2f45f9c2d0df9a45742a84de39cba3e70d078b153ff32c765791ed8 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.430564 4852 scope.go:117] "RemoveContainer" containerID="f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.431766 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.441711 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.450427 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.458838 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.459364 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-log" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.459388 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-log" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.459421 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3355180f-9d97-4440-aa5c-5319273300f7" containerName="nova-manage" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.459431 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3355180f-9d97-4440-aa5c-5319273300f7" containerName="nova-manage" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.459446 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-api" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.459455 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-api" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.459480 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-metadata" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.459489 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-metadata" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.459504 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-log" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.459513 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-log" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.460438 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-log" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.460479 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3355180f-9d97-4440-aa5c-5319273300f7" containerName="nova-manage" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.460492 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-log" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.460511 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" containerName="nova-metadata-metadata" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.460524 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" containerName="nova-api-api" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.461852 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.463572 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.489314 4852 scope.go:117] "RemoveContainer" containerID="ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.491066 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea4e12d2-11d9-4251-9588-34974b5f9989" path="/var/lib/kubelet/pods/ea4e12d2-11d9-4251-9588-34974b5f9989/volumes" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.491831 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.494123 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.503668 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.505176 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.508177 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.509393 4852 scope.go:117] "RemoveContainer" containerID="f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.509777 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504\": container with ID starting with f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504 not found: ID does not exist" containerID="f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.509819 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504"} err="failed to get container status \"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504\": rpc error: code = NotFound desc = could not find container \"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504\": container with ID starting with f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.509844 4852 scope.go:117] "RemoveContainer" containerID="ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" Jan 29 12:15:59 crc kubenswrapper[4852]: E0129 12:15:59.510084 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b\": container with ID starting with ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b not found: ID does not exist" containerID="ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.510110 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b"} err="failed to get container status \"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b\": rpc error: code = NotFound desc = could not find container \"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b\": container with ID starting with ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.510128 4852 scope.go:117] "RemoveContainer" containerID="f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.510377 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504"} err="failed to get container status \"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504\": rpc error: code = NotFound desc = could not find container \"f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504\": container with ID starting with f80a131bfda8e0c838a1d125c47ae26e6ee3cf35318ee7ed1a95e9078a0ca504 not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.510399 4852 scope.go:117] "RemoveContainer" containerID="ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.510690 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b"} err="failed to get container status \"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b\": rpc error: code = NotFound desc = could not find container \"ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b\": container with ID starting with ac4d500582d6a6b7e6c715f07fdedc626cda6193fa034b4d9abeaecadab43b9b not found: ID does not exist" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.511826 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.590747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.590896 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591041 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591141 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591217 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh5qq\" (UniqueName: \"kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591265 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591292 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.591318 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lgdf\" (UniqueName: \"kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693542 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693604 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693660 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693699 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693728 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh5qq\" (UniqueName: \"kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693746 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693763 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.693781 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lgdf\" (UniqueName: \"kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.694433 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.695632 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.700277 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.700739 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.701280 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.702393 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.716904 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.717084 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lgdf\" (UniqueName: \"kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf\") pod \"nova-metadata-0\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.723466 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh5qq\" (UniqueName: \"kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq\") pod \"nova-api-0\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.729963 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.799526 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.825830 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.846029 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.915090 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:15:59 crc kubenswrapper[4852]: I0129 12:15:59.915338 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="dnsmasq-dns" containerID="cri-o://7600c84cdfbf0bb92cb25bd890e47f1d59cdce326e845d58dcf8b790fe52851b" gracePeriod=10 Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.016682 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.016897 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.364209 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.382242 4852 generic.go:334] "Generic (PLEG): container finished" podID="f722387b-877f-46ff-9674-8bf258e77be7" containerID="7600c84cdfbf0bb92cb25bd890e47f1d59cdce326e845d58dcf8b790fe52851b" exitCode=0 Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.382310 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" event={"ID":"f722387b-877f-46ff-9674-8bf258e77be7","Type":"ContainerDied","Data":"7600c84cdfbf0bb92cb25bd890e47f1d59cdce326e845d58dcf8b790fe52851b"} Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.410789 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.446524 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.535430 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:00 crc kubenswrapper[4852]: W0129 12:16:00.538988 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6be9f292_4ab6_497d_99ef_083e2735d362.slice/crio-c4e0b31a44050a255886664bea0febc6fff18be673e7ea974ec510fbf4524cb1 WatchSource:0}: Error finding container c4e0b31a44050a255886664bea0febc6fff18be673e7ea974ec510fbf4524cb1: Status 404 returned error can't find the container with id c4e0b31a44050a255886664bea0febc6fff18be673e7ea974ec510fbf4524cb1 Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.657170 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config\") pod \"f722387b-877f-46ff-9674-8bf258e77be7\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.657270 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb\") pod \"f722387b-877f-46ff-9674-8bf258e77be7\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.657362 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsjrl\" (UniqueName: \"kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl\") pod \"f722387b-877f-46ff-9674-8bf258e77be7\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.657404 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb\") pod \"f722387b-877f-46ff-9674-8bf258e77be7\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.657569 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc\") pod \"f722387b-877f-46ff-9674-8bf258e77be7\" (UID: \"f722387b-877f-46ff-9674-8bf258e77be7\") " Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.669295 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl" (OuterVolumeSpecName: "kube-api-access-gsjrl") pod "f722387b-877f-46ff-9674-8bf258e77be7" (UID: "f722387b-877f-46ff-9674-8bf258e77be7"). InnerVolumeSpecName "kube-api-access-gsjrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.719537 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f722387b-877f-46ff-9674-8bf258e77be7" (UID: "f722387b-877f-46ff-9674-8bf258e77be7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.720864 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f722387b-877f-46ff-9674-8bf258e77be7" (UID: "f722387b-877f-46ff-9674-8bf258e77be7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.733240 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config" (OuterVolumeSpecName: "config") pod "f722387b-877f-46ff-9674-8bf258e77be7" (UID: "f722387b-877f-46ff-9674-8bf258e77be7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.738955 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f722387b-877f-46ff-9674-8bf258e77be7" (UID: "f722387b-877f-46ff-9674-8bf258e77be7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.760306 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.760529 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.760652 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsjrl\" (UniqueName: \"kubernetes.io/projected/f722387b-877f-46ff-9674-8bf258e77be7-kube-api-access-gsjrl\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.760713 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:00 crc kubenswrapper[4852]: I0129 12:16:00.760776 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f722387b-877f-46ff-9674-8bf258e77be7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.398964 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerStarted","Data":"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.399296 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerStarted","Data":"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.399310 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerStarted","Data":"c4e0b31a44050a255886664bea0febc6fff18be673e7ea974ec510fbf4524cb1"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.401547 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerStarted","Data":"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.401602 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerStarted","Data":"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.401618 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerStarted","Data":"5909ffb775e4dcfa2f16a94d4876198a92b6f5912f6e36136158bd01d928fe0e"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.404496 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" event={"ID":"f722387b-877f-46ff-9674-8bf258e77be7","Type":"ContainerDied","Data":"314818ed3556dfe0e186cf0d8e192158821234b9bd87350ee5d9bcaf5769914b"} Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.404540 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77c4d4b58c-2ghdr" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.404566 4852 scope.go:117] "RemoveContainer" containerID="7600c84cdfbf0bb92cb25bd890e47f1d59cdce326e845d58dcf8b790fe52851b" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.431950 4852 scope.go:117] "RemoveContainer" containerID="c42b3be31da1746ffd5bdb7bd09052ca758af5d5c333bf0227cf280d8a951887" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.434911 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.434884823 podStartE2EDuration="2.434884823s" podCreationTimestamp="2026-01-29 12:15:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:01.421030985 +0000 UTC m=+5658.638362199" watchObservedRunningTime="2026-01-29 12:16:01.434884823 +0000 UTC m=+5658.652215977" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.474349 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55dd6817-4b60-42b9-a66c-e3a2cfb73f4d" path="/var/lib/kubelet/pods/55dd6817-4b60-42b9-a66c-e3a2cfb73f4d/volumes" Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.474941 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.479864 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77c4d4b58c-2ghdr"] Jan 29 12:16:01 crc kubenswrapper[4852]: I0129 12:16:01.490667 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.490640933 podStartE2EDuration="2.490640933s" podCreationTimestamp="2026-01-29 12:15:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:01.454890192 +0000 UTC m=+5658.672221326" watchObservedRunningTime="2026-01-29 12:16:01.490640933 +0000 UTC m=+5658.707972077" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.420979 4852 generic.go:334] "Generic (PLEG): container finished" podID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" containerID="5b7731e49a8c8c0e1f73d4cb50bc8573667836fd1987525884c024eab817f0da" exitCode=0 Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.421060 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"650ab9c7-7090-42ef-bbe7-4684c333bdfe","Type":"ContainerDied","Data":"5b7731e49a8c8c0e1f73d4cb50bc8573667836fd1987525884c024eab817f0da"} Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.740002 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.866254 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data\") pod \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.866536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle\") pod \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.866630 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5rzc\" (UniqueName: \"kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc\") pod \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\" (UID: \"650ab9c7-7090-42ef-bbe7-4684c333bdfe\") " Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.874858 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc" (OuterVolumeSpecName: "kube-api-access-w5rzc") pod "650ab9c7-7090-42ef-bbe7-4684c333bdfe" (UID: "650ab9c7-7090-42ef-bbe7-4684c333bdfe"). InnerVolumeSpecName "kube-api-access-w5rzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.894116 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data" (OuterVolumeSpecName: "config-data") pod "650ab9c7-7090-42ef-bbe7-4684c333bdfe" (UID: "650ab9c7-7090-42ef-bbe7-4684c333bdfe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.896321 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "650ab9c7-7090-42ef-bbe7-4684c333bdfe" (UID: "650ab9c7-7090-42ef-bbe7-4684c333bdfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.970942 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.971217 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/650ab9c7-7090-42ef-bbe7-4684c333bdfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:02 crc kubenswrapper[4852]: I0129 12:16:02.971377 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5rzc\" (UniqueName: \"kubernetes.io/projected/650ab9c7-7090-42ef-bbe7-4684c333bdfe-kube-api-access-w5rzc\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.433278 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"650ab9c7-7090-42ef-bbe7-4684c333bdfe","Type":"ContainerDied","Data":"669bd5a8599f87c868a0baa52eeac30d515144f97d2b60cfb1ab491e3bcb5da6"} Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.433362 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.433375 4852 scope.go:117] "RemoveContainer" containerID="5b7731e49a8c8c0e1f73d4cb50bc8573667836fd1987525884c024eab817f0da" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.490415 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f722387b-877f-46ff-9674-8bf258e77be7" path="/var/lib/kubelet/pods/f722387b-877f-46ff-9674-8bf258e77be7/volumes" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.491834 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.491894 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.520439 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:03 crc kubenswrapper[4852]: E0129 12:16:03.521020 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="dnsmasq-dns" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.521156 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="dnsmasq-dns" Jan 29 12:16:03 crc kubenswrapper[4852]: E0129 12:16:03.521252 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="init" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.521303 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="init" Jan 29 12:16:03 crc kubenswrapper[4852]: E0129 12:16:03.521356 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" containerName="nova-scheduler-scheduler" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.521428 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" containerName="nova-scheduler-scheduler" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.521645 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" containerName="nova-scheduler-scheduler" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.521716 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f722387b-877f-46ff-9674-8bf258e77be7" containerName="dnsmasq-dns" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.522351 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.525309 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.534300 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.581489 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.581708 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.581739 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hs7nv\" (UniqueName: \"kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.684800 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.684900 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hs7nv\" (UniqueName: \"kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.685048 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.690447 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.690670 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.705939 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hs7nv\" (UniqueName: \"kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv\") pod \"nova-scheduler-0\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:03 crc kubenswrapper[4852]: I0129 12:16:03.871255 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:04 crc kubenswrapper[4852]: I0129 12:16:04.349364 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:04 crc kubenswrapper[4852]: W0129 12:16:04.359654 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod082cf921_8f9d_42c7_9fc9_e72224ae6808.slice/crio-eb1d477c98dbc54ac747b29477379c07870adc9bce2002bb702ec47e02c31110 WatchSource:0}: Error finding container eb1d477c98dbc54ac747b29477379c07870adc9bce2002bb702ec47e02c31110: Status 404 returned error can't find the container with id eb1d477c98dbc54ac747b29477379c07870adc9bce2002bb702ec47e02c31110 Jan 29 12:16:04 crc kubenswrapper[4852]: I0129 12:16:04.448470 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"082cf921-8f9d-42c7-9fc9-e72224ae6808","Type":"ContainerStarted","Data":"eb1d477c98dbc54ac747b29477379c07870adc9bce2002bb702ec47e02c31110"} Jan 29 12:16:04 crc kubenswrapper[4852]: I0129 12:16:04.826620 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:16:04 crc kubenswrapper[4852]: I0129 12:16:04.826674 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:16:05 crc kubenswrapper[4852]: I0129 12:16:05.459456 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"082cf921-8f9d-42c7-9fc9-e72224ae6808","Type":"ContainerStarted","Data":"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86"} Jan 29 12:16:05 crc kubenswrapper[4852]: I0129 12:16:05.476942 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="650ab9c7-7090-42ef-bbe7-4684c333bdfe" path="/var/lib/kubelet/pods/650ab9c7-7090-42ef-bbe7-4684c333bdfe/volumes" Jan 29 12:16:05 crc kubenswrapper[4852]: I0129 12:16:05.500444 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.500417225 podStartE2EDuration="2.500417225s" podCreationTimestamp="2026-01-29 12:16:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:05.484310411 +0000 UTC m=+5662.701641545" watchObservedRunningTime="2026-01-29 12:16:05.500417225 +0000 UTC m=+5662.717748359" Jan 29 12:16:07 crc kubenswrapper[4852]: I0129 12:16:07.748404 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.224108 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-47mtt"] Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.226047 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.228713 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.229783 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.239376 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-47mtt"] Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.379132 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.379221 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.379260 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp2jc\" (UniqueName: \"kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.379297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.480870 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.481208 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.481247 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp2jc\" (UniqueName: \"kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.481275 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.488464 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.489343 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.489680 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.513738 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp2jc\" (UniqueName: \"kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc\") pod \"nova-cell1-cell-mapping-47mtt\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.548554 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.872192 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 12:16:08 crc kubenswrapper[4852]: I0129 12:16:08.987147 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-47mtt"] Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.519140 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-47mtt" event={"ID":"0bf6245c-9a72-4da9-a118-ba98c6495cdc","Type":"ContainerStarted","Data":"c1df753753733380932cd04165424ba9439dbded91803d63dca62063917f48a8"} Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.519597 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-47mtt" event={"ID":"0bf6245c-9a72-4da9-a118-ba98c6495cdc","Type":"ContainerStarted","Data":"c285e33e4aa793ef016731078efc3406250f60e97b3dceebc657268c52be1622"} Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.541406 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-47mtt" podStartSLOduration=1.541388016 podStartE2EDuration="1.541388016s" podCreationTimestamp="2026-01-29 12:16:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:09.536570449 +0000 UTC m=+5666.753901583" watchObservedRunningTime="2026-01-29 12:16:09.541388016 +0000 UTC m=+5666.758719150" Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.799979 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.800035 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.826518 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:16:09 crc kubenswrapper[4852]: I0129 12:16:09.826573 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:16:10 crc kubenswrapper[4852]: I0129 12:16:10.843785 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.68:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:11 crc kubenswrapper[4852]: I0129 12:16:11.282411 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.68:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:11 crc kubenswrapper[4852]: I0129 12:16:11.330227 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.69:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:11 crc kubenswrapper[4852]: I0129 12:16:11.371796 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.69:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:13 crc kubenswrapper[4852]: I0129 12:16:13.872226 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 12:16:13 crc kubenswrapper[4852]: I0129 12:16:13.896318 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 12:16:14 crc kubenswrapper[4852]: I0129 12:16:14.595972 4852 generic.go:334] "Generic (PLEG): container finished" podID="0bf6245c-9a72-4da9-a118-ba98c6495cdc" containerID="c1df753753733380932cd04165424ba9439dbded91803d63dca62063917f48a8" exitCode=0 Jan 29 12:16:14 crc kubenswrapper[4852]: I0129 12:16:14.597310 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-47mtt" event={"ID":"0bf6245c-9a72-4da9-a118-ba98c6495cdc","Type":"ContainerDied","Data":"c1df753753733380932cd04165424ba9439dbded91803d63dca62063917f48a8"} Jan 29 12:16:14 crc kubenswrapper[4852]: I0129 12:16:14.638079 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.925711 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.957788 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts\") pod \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.958001 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data\") pod \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.958036 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle\") pod \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.958107 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vp2jc\" (UniqueName: \"kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc\") pod \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\" (UID: \"0bf6245c-9a72-4da9-a118-ba98c6495cdc\") " Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.963497 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts" (OuterVolumeSpecName: "scripts") pod "0bf6245c-9a72-4da9-a118-ba98c6495cdc" (UID: "0bf6245c-9a72-4da9-a118-ba98c6495cdc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.971840 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc" (OuterVolumeSpecName: "kube-api-access-vp2jc") pod "0bf6245c-9a72-4da9-a118-ba98c6495cdc" (UID: "0bf6245c-9a72-4da9-a118-ba98c6495cdc"). InnerVolumeSpecName "kube-api-access-vp2jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.983087 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data" (OuterVolumeSpecName: "config-data") pod "0bf6245c-9a72-4da9-a118-ba98c6495cdc" (UID: "0bf6245c-9a72-4da9-a118-ba98c6495cdc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:15 crc kubenswrapper[4852]: I0129 12:16:15.987793 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bf6245c-9a72-4da9-a118-ba98c6495cdc" (UID: "0bf6245c-9a72-4da9-a118-ba98c6495cdc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.060488 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.060516 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.060528 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vp2jc\" (UniqueName: \"kubernetes.io/projected/0bf6245c-9a72-4da9-a118-ba98c6495cdc-kube-api-access-vp2jc\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.060537 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bf6245c-9a72-4da9-a118-ba98c6495cdc-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.618116 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-47mtt" event={"ID":"0bf6245c-9a72-4da9-a118-ba98c6495cdc","Type":"ContainerDied","Data":"c285e33e4aa793ef016731078efc3406250f60e97b3dceebc657268c52be1622"} Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.618159 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c285e33e4aa793ef016731078efc3406250f60e97b3dceebc657268c52be1622" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.618219 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-47mtt" Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.805086 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.805489 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-api" containerID="cri-o://2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1" gracePeriod=30 Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.805411 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-log" containerID="cri-o://b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63" gracePeriod=30 Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.816148 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.816374 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerName="nova-scheduler-scheduler" containerID="cri-o://01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" gracePeriod=30 Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.827789 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.828016 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-log" containerID="cri-o://4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66" gracePeriod=30 Jan 29 12:16:16 crc kubenswrapper[4852]: I0129 12:16:16.828084 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-metadata" containerID="cri-o://3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56" gracePeriod=30 Jan 29 12:16:16 crc kubenswrapper[4852]: E0129 12:16:16.834826 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bf6245c_9a72_4da9_a118_ba98c6495cdc.slice/crio-c285e33e4aa793ef016731078efc3406250f60e97b3dceebc657268c52be1622\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bf6245c_9a72_4da9_a118_ba98c6495cdc.slice\": RecentStats: unable to find data in memory cache]" Jan 29 12:16:17 crc kubenswrapper[4852]: I0129 12:16:17.640826 4852 generic.go:334] "Generic (PLEG): container finished" podID="6be9f292-4ab6-497d-99ef-083e2735d362" containerID="4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66" exitCode=143 Jan 29 12:16:17 crc kubenswrapper[4852]: I0129 12:16:17.640924 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerDied","Data":"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66"} Jan 29 12:16:17 crc kubenswrapper[4852]: I0129 12:16:17.644707 4852 generic.go:334] "Generic (PLEG): container finished" podID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerID="b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63" exitCode=143 Jan 29 12:16:17 crc kubenswrapper[4852]: I0129 12:16:17.644754 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerDied","Data":"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63"} Jan 29 12:16:18 crc kubenswrapper[4852]: E0129 12:16:18.873949 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:16:18 crc kubenswrapper[4852]: E0129 12:16:18.876172 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:16:18 crc kubenswrapper[4852]: E0129 12:16:18.878018 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:16:18 crc kubenswrapper[4852]: E0129 12:16:18.878078 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerName="nova-scheduler-scheduler" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.500326 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.502399 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.547900 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lgdf\" (UniqueName: \"kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf\") pod \"6be9f292-4ab6-497d-99ef-083e2735d362\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.547989 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data\") pod \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548030 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs\") pod \"6be9f292-4ab6-497d-99ef-083e2735d362\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548096 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle\") pod \"6be9f292-4ab6-497d-99ef-083e2735d362\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548147 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sh5qq\" (UniqueName: \"kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq\") pod \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548205 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle\") pod \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548571 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data\") pod \"6be9f292-4ab6-497d-99ef-083e2735d362\" (UID: \"6be9f292-4ab6-497d-99ef-083e2735d362\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.548675 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs\") pod \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\" (UID: \"f9fe96c8-183a-43fa-a219-d0bbe8ba5633\") " Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.550684 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs" (OuterVolumeSpecName: "logs") pod "f9fe96c8-183a-43fa-a219-d0bbe8ba5633" (UID: "f9fe96c8-183a-43fa-a219-d0bbe8ba5633"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.556706 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs" (OuterVolumeSpecName: "logs") pod "6be9f292-4ab6-497d-99ef-083e2735d362" (UID: "6be9f292-4ab6-497d-99ef-083e2735d362"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.559332 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf" (OuterVolumeSpecName: "kube-api-access-6lgdf") pod "6be9f292-4ab6-497d-99ef-083e2735d362" (UID: "6be9f292-4ab6-497d-99ef-083e2735d362"). InnerVolumeSpecName "kube-api-access-6lgdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.565826 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq" (OuterVolumeSpecName: "kube-api-access-sh5qq") pod "f9fe96c8-183a-43fa-a219-d0bbe8ba5633" (UID: "f9fe96c8-183a-43fa-a219-d0bbe8ba5633"). InnerVolumeSpecName "kube-api-access-sh5qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.584758 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9fe96c8-183a-43fa-a219-d0bbe8ba5633" (UID: "f9fe96c8-183a-43fa-a219-d0bbe8ba5633"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.599913 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data" (OuterVolumeSpecName: "config-data") pod "f9fe96c8-183a-43fa-a219-d0bbe8ba5633" (UID: "f9fe96c8-183a-43fa-a219-d0bbe8ba5633"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.600026 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6be9f292-4ab6-497d-99ef-083e2735d362" (UID: "6be9f292-4ab6-497d-99ef-083e2735d362"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.613980 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data" (OuterVolumeSpecName: "config-data") pod "6be9f292-4ab6-497d-99ef-083e2735d362" (UID: "6be9f292-4ab6-497d-99ef-083e2735d362"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651415 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651452 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651463 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lgdf\" (UniqueName: \"kubernetes.io/projected/6be9f292-4ab6-497d-99ef-083e2735d362-kube-api-access-6lgdf\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651476 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651485 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6be9f292-4ab6-497d-99ef-083e2735d362-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651495 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6be9f292-4ab6-497d-99ef-083e2735d362-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651507 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sh5qq\" (UniqueName: \"kubernetes.io/projected/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-kube-api-access-sh5qq\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.651518 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fe96c8-183a-43fa-a219-d0bbe8ba5633-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.673030 4852 generic.go:334] "Generic (PLEG): container finished" podID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerID="2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1" exitCode=0 Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.673072 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.673096 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerDied","Data":"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1"} Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.673144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f9fe96c8-183a-43fa-a219-d0bbe8ba5633","Type":"ContainerDied","Data":"5909ffb775e4dcfa2f16a94d4876198a92b6f5912f6e36136158bd01d928fe0e"} Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.673178 4852 scope.go:117] "RemoveContainer" containerID="2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.676504 4852 generic.go:334] "Generic (PLEG): container finished" podID="6be9f292-4ab6-497d-99ef-083e2735d362" containerID="3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56" exitCode=0 Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.676531 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerDied","Data":"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56"} Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.676549 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6be9f292-4ab6-497d-99ef-083e2735d362","Type":"ContainerDied","Data":"c4e0b31a44050a255886664bea0febc6fff18be673e7ea974ec510fbf4524cb1"} Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.676620 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.699314 4852 scope.go:117] "RemoveContainer" containerID="b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.730932 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.731896 4852 scope.go:117] "RemoveContainer" containerID="2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.738395 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1\": container with ID starting with 2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1 not found: ID does not exist" containerID="2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.738453 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1"} err="failed to get container status \"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1\": rpc error: code = NotFound desc = could not find container \"2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1\": container with ID starting with 2309b0b213f5d17446b9cb1e71030cabad18495fcdc6f8c9f5bfb3b03e488ad1 not found: ID does not exist" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.738488 4852 scope.go:117] "RemoveContainer" containerID="b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.739471 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63\": container with ID starting with b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63 not found: ID does not exist" containerID="b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.739501 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63"} err="failed to get container status \"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63\": rpc error: code = NotFound desc = could not find container \"b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63\": container with ID starting with b45482456e9955ec982128587d7d2cc10e32fb70839c2d80cbf7b622eb66dc63 not found: ID does not exist" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.739521 4852 scope.go:117] "RemoveContainer" containerID="3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.755249 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.771688 4852 scope.go:117] "RemoveContainer" containerID="4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.781763 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.792969 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.799823 4852 scope.go:117] "RemoveContainer" containerID="3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.802680 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.803184 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-metadata" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803210 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-metadata" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.803232 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-log" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803242 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-log" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.803256 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bf6245c-9a72-4da9-a118-ba98c6495cdc" containerName="nova-manage" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803264 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bf6245c-9a72-4da9-a118-ba98c6495cdc" containerName="nova-manage" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.803292 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-api" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803300 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-api" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.803318 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-log" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803325 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-log" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803560 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-api" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803593 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-log" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803672 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" containerName="nova-metadata-metadata" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803686 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" containerName="nova-api-log" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.803706 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bf6245c-9a72-4da9-a118-ba98c6495cdc" containerName="nova-manage" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.804917 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.805051 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56\": container with ID starting with 3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56 not found: ID does not exist" containerID="3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.805148 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56"} err="failed to get container status \"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56\": rpc error: code = NotFound desc = could not find container \"3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56\": container with ID starting with 3c0059db8eb0e1f8ae5e524540354fd19a939360175a3a9a193b132716a6cf56 not found: ID does not exist" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.805185 4852 scope.go:117] "RemoveContainer" containerID="4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66" Jan 29 12:16:20 crc kubenswrapper[4852]: E0129 12:16:20.806039 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66\": container with ID starting with 4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66 not found: ID does not exist" containerID="4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.806090 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66"} err="failed to get container status \"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66\": rpc error: code = NotFound desc = could not find container \"4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66\": container with ID starting with 4cadff76398fb15ba2d06b11b8c7d0456400bcd3dcd237fe99ac77f554beaf66 not found: ID does not exist" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.807289 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.811770 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.821899 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.827142 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.829208 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.830196 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.854645 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrkwt\" (UniqueName: \"kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.854682 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.854797 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.854966 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnchw\" (UniqueName: \"kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.855073 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.855156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.855234 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.855285 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956050 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnchw\" (UniqueName: \"kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956394 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956424 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956448 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956466 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956522 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrkwt\" (UniqueName: \"kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956542 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.956570 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.957288 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.957307 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.960726 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.960754 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.960729 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.963363 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.972212 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrkwt\" (UniqueName: \"kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt\") pod \"nova-metadata-0\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " pod="openstack/nova-metadata-0" Jan 29 12:16:20 crc kubenswrapper[4852]: I0129 12:16:20.975354 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnchw\" (UniqueName: \"kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw\") pod \"nova-api-0\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " pod="openstack/nova-api-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.131108 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.143587 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.439900 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.465282 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hs7nv\" (UniqueName: \"kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv\") pod \"082cf921-8f9d-42c7-9fc9-e72224ae6808\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.465359 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data\") pod \"082cf921-8f9d-42c7-9fc9-e72224ae6808\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.465387 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle\") pod \"082cf921-8f9d-42c7-9fc9-e72224ae6808\" (UID: \"082cf921-8f9d-42c7-9fc9-e72224ae6808\") " Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.472405 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv" (OuterVolumeSpecName: "kube-api-access-hs7nv") pod "082cf921-8f9d-42c7-9fc9-e72224ae6808" (UID: "082cf921-8f9d-42c7-9fc9-e72224ae6808"). InnerVolumeSpecName "kube-api-access-hs7nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.485440 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6be9f292-4ab6-497d-99ef-083e2735d362" path="/var/lib/kubelet/pods/6be9f292-4ab6-497d-99ef-083e2735d362/volumes" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.486443 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9fe96c8-183a-43fa-a219-d0bbe8ba5633" path="/var/lib/kubelet/pods/f9fe96c8-183a-43fa-a219-d0bbe8ba5633/volumes" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.498974 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "082cf921-8f9d-42c7-9fc9-e72224ae6808" (UID: "082cf921-8f9d-42c7-9fc9-e72224ae6808"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.502777 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data" (OuterVolumeSpecName: "config-data") pod "082cf921-8f9d-42c7-9fc9-e72224ae6808" (UID: "082cf921-8f9d-42c7-9fc9-e72224ae6808"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.568340 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hs7nv\" (UniqueName: \"kubernetes.io/projected/082cf921-8f9d-42c7-9fc9-e72224ae6808-kube-api-access-hs7nv\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.568381 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.568394 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082cf921-8f9d-42c7-9fc9-e72224ae6808-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.592803 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.672694 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: W0129 12:16:21.674518 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc52dd439_54a2_4094_bffa_42647f21c628.slice/crio-401c28c289c192654252383f81d6f2a8fdf7b8faa7af5b035f02446d78f64bf6 WatchSource:0}: Error finding container 401c28c289c192654252383f81d6f2a8fdf7b8faa7af5b035f02446d78f64bf6: Status 404 returned error can't find the container with id 401c28c289c192654252383f81d6f2a8fdf7b8faa7af5b035f02446d78f64bf6 Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.685490 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerStarted","Data":"6ef26556f2e77f0f1cd95b9583fd3fa4e7b8ad987b3793a311ed863542a4815e"} Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.687276 4852 generic.go:334] "Generic (PLEG): container finished" podID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" exitCode=0 Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.687331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"082cf921-8f9d-42c7-9fc9-e72224ae6808","Type":"ContainerDied","Data":"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86"} Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.687350 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"082cf921-8f9d-42c7-9fc9-e72224ae6808","Type":"ContainerDied","Data":"eb1d477c98dbc54ac747b29477379c07870adc9bce2002bb702ec47e02c31110"} Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.687368 4852 scope.go:117] "RemoveContainer" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.687481 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.690508 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerStarted","Data":"401c28c289c192654252383f81d6f2a8fdf7b8faa7af5b035f02446d78f64bf6"} Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.719090 4852 scope.go:117] "RemoveContainer" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" Jan 29 12:16:21 crc kubenswrapper[4852]: E0129 12:16:21.719950 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86\": container with ID starting with 01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86 not found: ID does not exist" containerID="01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.719988 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86"} err="failed to get container status \"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86\": rpc error: code = NotFound desc = could not find container \"01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86\": container with ID starting with 01ae7e1c231376a4c231bfc91fdd6ecb03250039d0c434d6fce681fcd1de2d86 not found: ID does not exist" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.737346 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.750627 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.765266 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: E0129 12:16:21.765721 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerName="nova-scheduler-scheduler" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.765738 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerName="nova-scheduler-scheduler" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.765897 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" containerName="nova-scheduler-scheduler" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.766638 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.772109 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.772882 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.772980 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcxcl\" (UniqueName: \"kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.773130 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.776900 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.875640 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.875755 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.875836 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcxcl\" (UniqueName: \"kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.878839 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.884189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:21 crc kubenswrapper[4852]: I0129 12:16:21.891219 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcxcl\" (UniqueName: \"kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl\") pod \"nova-scheduler-0\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " pod="openstack/nova-scheduler-0" Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.096761 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.586233 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.708759 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerStarted","Data":"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f"} Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.708799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerStarted","Data":"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9"} Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.711659 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerStarted","Data":"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0"} Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.711718 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerStarted","Data":"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755"} Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.713107 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb9fc717-2f1e-48f4-b648-084429d4c3ef","Type":"ContainerStarted","Data":"9dd3219acd74129cca02bb3dfe3722e707435afa6a848789678ecec0619cf161"} Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.736027 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.735998646 podStartE2EDuration="2.735998646s" podCreationTimestamp="2026-01-29 12:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:22.726214037 +0000 UTC m=+5679.943545181" watchObservedRunningTime="2026-01-29 12:16:22.735998646 +0000 UTC m=+5679.953329790" Jan 29 12:16:22 crc kubenswrapper[4852]: I0129 12:16:22.751100 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.751081604 podStartE2EDuration="2.751081604s" podCreationTimestamp="2026-01-29 12:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:22.744121584 +0000 UTC m=+5679.961452718" watchObservedRunningTime="2026-01-29 12:16:22.751081604 +0000 UTC m=+5679.968412728" Jan 29 12:16:23 crc kubenswrapper[4852]: I0129 12:16:23.496042 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="082cf921-8f9d-42c7-9fc9-e72224ae6808" path="/var/lib/kubelet/pods/082cf921-8f9d-42c7-9fc9-e72224ae6808/volumes" Jan 29 12:16:23 crc kubenswrapper[4852]: I0129 12:16:23.722338 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb9fc717-2f1e-48f4-b648-084429d4c3ef","Type":"ContainerStarted","Data":"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc"} Jan 29 12:16:23 crc kubenswrapper[4852]: I0129 12:16:23.761929 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.761911364 podStartE2EDuration="2.761911364s" podCreationTimestamp="2026-01-29 12:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:23.75153966 +0000 UTC m=+5680.968870804" watchObservedRunningTime="2026-01-29 12:16:23.761911364 +0000 UTC m=+5680.979242498" Jan 29 12:16:26 crc kubenswrapper[4852]: I0129 12:16:26.144801 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:16:26 crc kubenswrapper[4852]: I0129 12:16:26.145149 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:16:27 crc kubenswrapper[4852]: I0129 12:16:27.097351 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.017301 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.017906 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.017956 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.018683 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.018737 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" gracePeriod=600 Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.808758 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" exitCode=0 Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.808959 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a"} Jan 29 12:16:30 crc kubenswrapper[4852]: I0129 12:16:30.809187 4852 scope.go:117] "RemoveContainer" containerID="39ac92376ac0767fe7ffa13bcbc31ca20190bc032b2006a4dbc47c525a07d124" Jan 29 12:16:30 crc kubenswrapper[4852]: E0129 12:16:30.916134 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:16:31 crc kubenswrapper[4852]: I0129 12:16:31.131882 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:16:31 crc kubenswrapper[4852]: I0129 12:16:31.132030 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:16:31 crc kubenswrapper[4852]: I0129 12:16:31.144862 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:16:31 crc kubenswrapper[4852]: I0129 12:16:31.144924 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:16:31 crc kubenswrapper[4852]: I0129 12:16:31.827345 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:16:31 crc kubenswrapper[4852]: E0129 12:16:31.827607 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.097523 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.131323 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.298345 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.72:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.298364 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.72:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.298421 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.73:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.298353 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.73:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:16:32 crc kubenswrapper[4852]: I0129 12:16:32.891350 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.135370 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.136762 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.137102 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.139987 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.146561 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.146681 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.148495 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.150681 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.955130 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 12:16:41 crc kubenswrapper[4852]: I0129 12:16:41.964047 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.150203 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.153455 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.169345 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.282370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.282788 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.283001 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7khq7\" (UniqueName: \"kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.283195 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.283340 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.384736 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7khq7\" (UniqueName: \"kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.384814 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.384838 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.384906 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.384935 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.385713 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.386213 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.386617 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.386953 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.406650 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7khq7\" (UniqueName: \"kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7\") pod \"dnsmasq-dns-6c6cb56f7c-9tgmj\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.472179 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:42 crc kubenswrapper[4852]: I0129 12:16:42.995283 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:16:43 crc kubenswrapper[4852]: W0129 12:16:43.005178 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4d5df6d_07ea_4247_8be4_7771c7c4124d.slice/crio-ef2ab725b21058fcc0853dd2695209379a81611c02e0701d83395c28f66daf5e WatchSource:0}: Error finding container ef2ab725b21058fcc0853dd2695209379a81611c02e0701d83395c28f66daf5e: Status 404 returned error can't find the container with id ef2ab725b21058fcc0853dd2695209379a81611c02e0701d83395c28f66daf5e Jan 29 12:16:43 crc kubenswrapper[4852]: I0129 12:16:43.971534 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerStarted","Data":"ef2ab725b21058fcc0853dd2695209379a81611c02e0701d83395c28f66daf5e"} Jan 29 12:16:44 crc kubenswrapper[4852]: I0129 12:16:44.981843 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerStarted","Data":"fe1c00a30168c872a6eafa1796b1bf1ecdf65559c50bb7aad77bcdb7f646abdb"} Jan 29 12:16:46 crc kubenswrapper[4852]: I0129 12:16:46.464264 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:16:46 crc kubenswrapper[4852]: E0129 12:16:46.464761 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:16:54 crc kubenswrapper[4852]: I0129 12:16:54.099264 4852 generic.go:334] "Generic (PLEG): container finished" podID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerID="fe1c00a30168c872a6eafa1796b1bf1ecdf65559c50bb7aad77bcdb7f646abdb" exitCode=0 Jan 29 12:16:54 crc kubenswrapper[4852]: I0129 12:16:54.099390 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerDied","Data":"fe1c00a30168c872a6eafa1796b1bf1ecdf65559c50bb7aad77bcdb7f646abdb"} Jan 29 12:16:55 crc kubenswrapper[4852]: I0129 12:16:55.116971 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerStarted","Data":"bbf4f2c362f4dd8d90bbd1d6fa0d36ef52086d6c4274363200845da86fab43ca"} Jan 29 12:16:55 crc kubenswrapper[4852]: I0129 12:16:55.117497 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:16:55 crc kubenswrapper[4852]: I0129 12:16:55.141840 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" podStartSLOduration=13.141817862 podStartE2EDuration="13.141817862s" podCreationTimestamp="2026-01-29 12:16:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:16:55.139024094 +0000 UTC m=+5712.356355248" watchObservedRunningTime="2026-01-29 12:16:55.141817862 +0000 UTC m=+5712.359148996" Jan 29 12:17:00 crc kubenswrapper[4852]: I0129 12:17:00.464471 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:17:00 crc kubenswrapper[4852]: E0129 12:17:00.466184 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:17:02 crc kubenswrapper[4852]: I0129 12:17:02.474251 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:17:02 crc kubenswrapper[4852]: I0129 12:17:02.557832 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:17:02 crc kubenswrapper[4852]: I0129 12:17:02.558212 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="dnsmasq-dns" containerID="cri-o://9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331" gracePeriod=10 Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.073006 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.145442 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb\") pod \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.145547 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config\") pod \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.145712 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb\") pod \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.145831 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtv76\" (UniqueName: \"kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76\") pod \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.145893 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc\") pod \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\" (UID: \"81e8dc7c-ee39-48d3-bb61-56822eddf64e\") " Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.151995 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76" (OuterVolumeSpecName: "kube-api-access-qtv76") pod "81e8dc7c-ee39-48d3-bb61-56822eddf64e" (UID: "81e8dc7c-ee39-48d3-bb61-56822eddf64e"). InnerVolumeSpecName "kube-api-access-qtv76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.199495 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "81e8dc7c-ee39-48d3-bb61-56822eddf64e" (UID: "81e8dc7c-ee39-48d3-bb61-56822eddf64e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.206505 4852 generic.go:334] "Generic (PLEG): container finished" podID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerID="9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331" exitCode=0 Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.206560 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" event={"ID":"81e8dc7c-ee39-48d3-bb61-56822eddf64e","Type":"ContainerDied","Data":"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331"} Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.206610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" event={"ID":"81e8dc7c-ee39-48d3-bb61-56822eddf64e","Type":"ContainerDied","Data":"41abbbb4918c40fa324a39d66d98f276e87e6381f0dc6d76ba73557584eb02df"} Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.206630 4852 scope.go:117] "RemoveContainer" containerID="9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.206933 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-775f85d44f-s8cqk" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.230713 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config" (OuterVolumeSpecName: "config") pod "81e8dc7c-ee39-48d3-bb61-56822eddf64e" (UID: "81e8dc7c-ee39-48d3-bb61-56822eddf64e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.249084 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtv76\" (UniqueName: \"kubernetes.io/projected/81e8dc7c-ee39-48d3-bb61-56822eddf64e-kube-api-access-qtv76\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.249144 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.249158 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.251288 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "81e8dc7c-ee39-48d3-bb61-56822eddf64e" (UID: "81e8dc7c-ee39-48d3-bb61-56822eddf64e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.256977 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "81e8dc7c-ee39-48d3-bb61-56822eddf64e" (UID: "81e8dc7c-ee39-48d3-bb61-56822eddf64e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.273674 4852 scope.go:117] "RemoveContainer" containerID="cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.297863 4852 scope.go:117] "RemoveContainer" containerID="9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331" Jan 29 12:17:03 crc kubenswrapper[4852]: E0129 12:17:03.299547 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331\": container with ID starting with 9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331 not found: ID does not exist" containerID="9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.299615 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331"} err="failed to get container status \"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331\": rpc error: code = NotFound desc = could not find container \"9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331\": container with ID starting with 9f7cca4abfab184c7a818761768788e036a283727ade8c407cbae3f9727c2331 not found: ID does not exist" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.299659 4852 scope.go:117] "RemoveContainer" containerID="cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1" Jan 29 12:17:03 crc kubenswrapper[4852]: E0129 12:17:03.300036 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1\": container with ID starting with cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1 not found: ID does not exist" containerID="cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.300085 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1"} err="failed to get container status \"cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1\": rpc error: code = NotFound desc = could not find container \"cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1\": container with ID starting with cf3ebcf56521f3a27db561c5e5134c0d892b460a20f34d1c7eaaf39cd119bde1 not found: ID does not exist" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.350977 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.351031 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81e8dc7c-ee39-48d3-bb61-56822eddf64e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.530687 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:17:03 crc kubenswrapper[4852]: I0129 12:17:03.542253 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-775f85d44f-s8cqk"] Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.480543 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" path="/var/lib/kubelet/pods/81e8dc7c-ee39-48d3-bb61-56822eddf64e/volumes" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.718556 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-hw4nm"] Jan 29 12:17:05 crc kubenswrapper[4852]: E0129 12:17:05.719023 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="dnsmasq-dns" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.719045 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="dnsmasq-dns" Jan 29 12:17:05 crc kubenswrapper[4852]: E0129 12:17:05.719083 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="init" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.719090 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="init" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.719325 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="81e8dc7c-ee39-48d3-bb61-56822eddf64e" containerName="dnsmasq-dns" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.719929 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.731872 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hw4nm"] Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.797954 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b564m\" (UniqueName: \"kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.798045 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.819417 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-4c4b-account-create-update-g5kfb"] Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.821058 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.823677 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.828166 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4c4b-account-create-update-g5kfb"] Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.899258 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.899360 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.899479 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7dm6\" (UniqueName: \"kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.899509 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b564m\" (UniqueName: \"kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.900344 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:05 crc kubenswrapper[4852]: I0129 12:17:05.918938 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b564m\" (UniqueName: \"kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m\") pod \"cinder-db-create-hw4nm\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.001026 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.001111 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7dm6\" (UniqueName: \"kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.001843 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.019940 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7dm6\" (UniqueName: \"kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6\") pod \"cinder-4c4b-account-create-update-g5kfb\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.040015 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.138534 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.519327 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-hw4nm"] Jan 29 12:17:06 crc kubenswrapper[4852]: I0129 12:17:06.664093 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4c4b-account-create-update-g5kfb"] Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.257046 4852 generic.go:334] "Generic (PLEG): container finished" podID="225c5c60-ced3-48b4-88dc-23d98191127a" containerID="0a2ef7543c6eb2a01a8730d03f63274bdeefb77c9e467721b799ef795e712c67" exitCode=0 Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.257142 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hw4nm" event={"ID":"225c5c60-ced3-48b4-88dc-23d98191127a","Type":"ContainerDied","Data":"0a2ef7543c6eb2a01a8730d03f63274bdeefb77c9e467721b799ef795e712c67"} Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.257210 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hw4nm" event={"ID":"225c5c60-ced3-48b4-88dc-23d98191127a","Type":"ContainerStarted","Data":"cc6ee27ead87e398c5612b115a0dd2f08ffe9e4c7610ccdb9fe9d925c28f28ff"} Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.260034 4852 generic.go:334] "Generic (PLEG): container finished" podID="d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" containerID="fc4a151e6dd42b4ee8f1e5187b02d2f1b1c992aefafb1059b1bc2a9a52c33c70" exitCode=0 Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.260110 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4c4b-account-create-update-g5kfb" event={"ID":"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e","Type":"ContainerDied","Data":"fc4a151e6dd42b4ee8f1e5187b02d2f1b1c992aefafb1059b1bc2a9a52c33c70"} Jan 29 12:17:07 crc kubenswrapper[4852]: I0129 12:17:07.260168 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4c4b-account-create-update-g5kfb" event={"ID":"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e","Type":"ContainerStarted","Data":"00e739a15a9829524257346b34bc29e35889934dd15d0cdd79f18df1fb178b59"} Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.729017 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.735088 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.886679 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7dm6\" (UniqueName: \"kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6\") pod \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.886950 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts\") pod \"225c5c60-ced3-48b4-88dc-23d98191127a\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.887023 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b564m\" (UniqueName: \"kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m\") pod \"225c5c60-ced3-48b4-88dc-23d98191127a\" (UID: \"225c5c60-ced3-48b4-88dc-23d98191127a\") " Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.887069 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts\") pod \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\" (UID: \"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e\") " Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.888044 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "225c5c60-ced3-48b4-88dc-23d98191127a" (UID: "225c5c60-ced3-48b4-88dc-23d98191127a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.888140 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" (UID: "d35e0fe5-a6f0-46c3-8259-e1d777d01d1e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.894151 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m" (OuterVolumeSpecName: "kube-api-access-b564m") pod "225c5c60-ced3-48b4-88dc-23d98191127a" (UID: "225c5c60-ced3-48b4-88dc-23d98191127a"). InnerVolumeSpecName "kube-api-access-b564m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.894271 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6" (OuterVolumeSpecName: "kube-api-access-h7dm6") pod "d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" (UID: "d35e0fe5-a6f0-46c3-8259-e1d777d01d1e"). InnerVolumeSpecName "kube-api-access-h7dm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.989554 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b564m\" (UniqueName: \"kubernetes.io/projected/225c5c60-ced3-48b4-88dc-23d98191127a-kube-api-access-b564m\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.990066 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.990085 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7dm6\" (UniqueName: \"kubernetes.io/projected/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e-kube-api-access-h7dm6\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:08 crc kubenswrapper[4852]: I0129 12:17:08.990102 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/225c5c60-ced3-48b4-88dc-23d98191127a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.289213 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-hw4nm" event={"ID":"225c5c60-ced3-48b4-88dc-23d98191127a","Type":"ContainerDied","Data":"cc6ee27ead87e398c5612b115a0dd2f08ffe9e4c7610ccdb9fe9d925c28f28ff"} Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.289537 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc6ee27ead87e398c5612b115a0dd2f08ffe9e4c7610ccdb9fe9d925c28f28ff" Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.289269 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-hw4nm" Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.292694 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4c4b-account-create-update-g5kfb" event={"ID":"d35e0fe5-a6f0-46c3-8259-e1d777d01d1e","Type":"ContainerDied","Data":"00e739a15a9829524257346b34bc29e35889934dd15d0cdd79f18df1fb178b59"} Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.292742 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00e739a15a9829524257346b34bc29e35889934dd15d0cdd79f18df1fb178b59" Jan 29 12:17:09 crc kubenswrapper[4852]: I0129 12:17:09.292958 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4c4b-account-create-update-g5kfb" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.027506 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-9glwg"] Jan 29 12:17:11 crc kubenswrapper[4852]: E0129 12:17:11.028266 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" containerName="mariadb-account-create-update" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.028281 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" containerName="mariadb-account-create-update" Jan 29 12:17:11 crc kubenswrapper[4852]: E0129 12:17:11.028308 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="225c5c60-ced3-48b4-88dc-23d98191127a" containerName="mariadb-database-create" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.028315 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="225c5c60-ced3-48b4-88dc-23d98191127a" containerName="mariadb-database-create" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.028515 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" containerName="mariadb-account-create-update" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.028539 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="225c5c60-ced3-48b4-88dc-23d98191127a" containerName="mariadb-database-create" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.029298 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.031403 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.031436 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nb6wf" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.033741 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.044597 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9glwg"] Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.133688 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.133768 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.133818 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.133888 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.133915 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwdcb\" (UniqueName: \"kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.134014 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.235937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.235985 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwdcb\" (UniqueName: \"kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.236053 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.236126 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.236161 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.236176 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.236251 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.242689 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.244409 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.247528 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.256145 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.266097 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwdcb\" (UniqueName: \"kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb\") pod \"cinder-db-sync-9glwg\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.362363 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:11 crc kubenswrapper[4852]: W0129 12:17:11.876391 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c8af433_a656_4d8d_8c42_65ee5672e549.slice/crio-170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e WatchSource:0}: Error finding container 170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e: Status 404 returned error can't find the container with id 170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e Jan 29 12:17:11 crc kubenswrapper[4852]: I0129 12:17:11.878258 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-9glwg"] Jan 29 12:17:12 crc kubenswrapper[4852]: I0129 12:17:12.323610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9glwg" event={"ID":"9c8af433-a656-4d8d-8c42-65ee5672e549","Type":"ContainerStarted","Data":"170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e"} Jan 29 12:17:12 crc kubenswrapper[4852]: I0129 12:17:12.464016 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:17:12 crc kubenswrapper[4852]: E0129 12:17:12.464301 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:17:13 crc kubenswrapper[4852]: I0129 12:17:13.350563 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9glwg" event={"ID":"9c8af433-a656-4d8d-8c42-65ee5672e549","Type":"ContainerStarted","Data":"a7273c844f72ac3a949308876ed1d0d5d08f22a75058aaf529b0edab72bd4b02"} Jan 29 12:17:13 crc kubenswrapper[4852]: I0129 12:17:13.375537 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-9glwg" podStartSLOduration=2.375519383 podStartE2EDuration="2.375519383s" podCreationTimestamp="2026-01-29 12:17:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:13.369522947 +0000 UTC m=+5730.586854101" watchObservedRunningTime="2026-01-29 12:17:13.375519383 +0000 UTC m=+5730.592850517" Jan 29 12:17:17 crc kubenswrapper[4852]: I0129 12:17:17.384204 4852 generic.go:334] "Generic (PLEG): container finished" podID="9c8af433-a656-4d8d-8c42-65ee5672e549" containerID="a7273c844f72ac3a949308876ed1d0d5d08f22a75058aaf529b0edab72bd4b02" exitCode=0 Jan 29 12:17:17 crc kubenswrapper[4852]: I0129 12:17:17.384667 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9glwg" event={"ID":"9c8af433-a656-4d8d-8c42-65ee5672e549","Type":"ContainerDied","Data":"a7273c844f72ac3a949308876ed1d0d5d08f22a75058aaf529b0edab72bd4b02"} Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.875288 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.987717 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwdcb\" (UniqueName: \"kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.987861 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.987912 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.988032 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.988066 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.988164 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle\") pod \"9c8af433-a656-4d8d-8c42-65ee5672e549\" (UID: \"9c8af433-a656-4d8d-8c42-65ee5672e549\") " Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.988629 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.988977 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c8af433-a656-4d8d-8c42-65ee5672e549-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.993215 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb" (OuterVolumeSpecName: "kube-api-access-dwdcb") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "kube-api-access-dwdcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.993438 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts" (OuterVolumeSpecName: "scripts") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:18 crc kubenswrapper[4852]: I0129 12:17:18.994248 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.015245 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.036541 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data" (OuterVolumeSpecName: "config-data") pod "9c8af433-a656-4d8d-8c42-65ee5672e549" (UID: "9c8af433-a656-4d8d-8c42-65ee5672e549"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.091533 4852 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.091568 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.091593 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.091631 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwdcb\" (UniqueName: \"kubernetes.io/projected/9c8af433-a656-4d8d-8c42-65ee5672e549-kube-api-access-dwdcb\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.091642 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c8af433-a656-4d8d-8c42-65ee5672e549-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.411513 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-9glwg" event={"ID":"9c8af433-a656-4d8d-8c42-65ee5672e549","Type":"ContainerDied","Data":"170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e"} Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.411550 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="170355d3862637b6bec9454b90e8b09c85d20b44a4d118e960cfd4117222bb6e" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.411629 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-9glwg" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.714790 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:17:19 crc kubenswrapper[4852]: E0129 12:17:19.715409 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8af433-a656-4d8d-8c42-65ee5672e549" containerName="cinder-db-sync" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.715427 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8af433-a656-4d8d-8c42-65ee5672e549" containerName="cinder-db-sync" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.715659 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c8af433-a656-4d8d-8c42-65ee5672e549" containerName="cinder-db-sync" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.719275 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.756403 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.805805 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g42h2\" (UniqueName: \"kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.805871 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.805996 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.806036 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.806071 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.907343 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g42h2\" (UniqueName: \"kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.907415 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.907471 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.907495 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.907517 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.908445 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.909361 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.910106 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.910491 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.925468 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g42h2\" (UniqueName: \"kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2\") pod \"dnsmasq-dns-59dcdddb99-s7jdf\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.968954 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.970454 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.978438 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.983179 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nb6wf" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.983384 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.983620 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 12:17:19 crc kubenswrapper[4852]: I0129 12:17:19.984083 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.046967 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111453 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111503 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111639 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111682 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111714 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111738 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tr9t\" (UniqueName: \"kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:20 crc kubenswrapper[4852]: I0129 12:17:20.111779 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214040 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214419 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214520 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214571 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214635 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214665 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tr9t\" (UniqueName: \"kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.214883 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.215491 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.217569 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.222401 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.222943 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.235810 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.237773 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tr9t\" (UniqueName: \"kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t\") pod \"cinder-api-0\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:20.306755 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:21.770544 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:17:21 crc kubenswrapper[4852]: W0129 12:17:21.780704 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4100243b_aafb_4ca0_8822_ecd70ec56f55.slice/crio-9cbd3fb49f0a5bbad463ffc62398fa5f51bf00c4e620ab816297046a8817a78a WatchSource:0}: Error finding container 9cbd3fb49f0a5bbad463ffc62398fa5f51bf00c4e620ab816297046a8817a78a: Status 404 returned error can't find the container with id 9cbd3fb49f0a5bbad463ffc62398fa5f51bf00c4e620ab816297046a8817a78a Jan 29 12:17:21 crc kubenswrapper[4852]: I0129 12:17:21.819385 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:21 crc kubenswrapper[4852]: W0129 12:17:21.828241 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod806efd2e_156e_407f_a9f9_26eee522e9ef.slice/crio-ca757aa4e6179e4657268faf1a1cadae8cd38eb93b25d86f4820d6c5f989bcc4 WatchSource:0}: Error finding container ca757aa4e6179e4657268faf1a1cadae8cd38eb93b25d86f4820d6c5f989bcc4: Status 404 returned error can't find the container with id ca757aa4e6179e4657268faf1a1cadae8cd38eb93b25d86f4820d6c5f989bcc4 Jan 29 12:17:22 crc kubenswrapper[4852]: I0129 12:17:22.442080 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" event={"ID":"4100243b-aafb-4ca0-8822-ecd70ec56f55","Type":"ContainerDied","Data":"d3382521d5c606c5dbaa1469e8343a67f404c4992b50e3eb09c4804bcc863c60"} Jan 29 12:17:22 crc kubenswrapper[4852]: I0129 12:17:22.442170 4852 generic.go:334] "Generic (PLEG): container finished" podID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerID="d3382521d5c606c5dbaa1469e8343a67f404c4992b50e3eb09c4804bcc863c60" exitCode=0 Jan 29 12:17:22 crc kubenswrapper[4852]: I0129 12:17:22.443456 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" event={"ID":"4100243b-aafb-4ca0-8822-ecd70ec56f55","Type":"ContainerStarted","Data":"9cbd3fb49f0a5bbad463ffc62398fa5f51bf00c4e620ab816297046a8817a78a"} Jan 29 12:17:22 crc kubenswrapper[4852]: I0129 12:17:22.445382 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerStarted","Data":"1b330a5a09461ed1c44cd8950b2c57ef4ae2053e02d2d8aeb68f3deb078dca06"} Jan 29 12:17:22 crc kubenswrapper[4852]: I0129 12:17:22.445486 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerStarted","Data":"ca757aa4e6179e4657268faf1a1cadae8cd38eb93b25d86f4820d6c5f989bcc4"} Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.455282 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerStarted","Data":"c1fd62853b57ad8f7bd89e942353cfd657bb5fdabcfaaf5daa691dc3494cd8aa"} Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.455952 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.457774 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" event={"ID":"4100243b-aafb-4ca0-8822-ecd70ec56f55","Type":"ContainerStarted","Data":"441cc2fda8cb89c9963bf264ac097d76b32f3386322c4c9005e5e73a6ee67ba4"} Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.458614 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.510724 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" podStartSLOduration=4.5106975049999996 podStartE2EDuration="4.510697505s" podCreationTimestamp="2026-01-29 12:17:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:23.506073413 +0000 UTC m=+5740.723404557" watchObservedRunningTime="2026-01-29 12:17:23.510697505 +0000 UTC m=+5740.728028669" Jan 29 12:17:23 crc kubenswrapper[4852]: I0129 12:17:23.518875 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.5188483040000005 podStartE2EDuration="4.518848304s" podCreationTimestamp="2026-01-29 12:17:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:23.48092766 +0000 UTC m=+5740.698258794" watchObservedRunningTime="2026-01-29 12:17:23.518848304 +0000 UTC m=+5740.736179468" Jan 29 12:17:25 crc kubenswrapper[4852]: I0129 12:17:25.464568 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:17:25 crc kubenswrapper[4852]: E0129 12:17:25.465210 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.049516 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.121349 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.121620 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="dnsmasq-dns" containerID="cri-o://bbf4f2c362f4dd8d90bbd1d6fa0d36ef52086d6c4274363200845da86fab43ca" gracePeriod=10 Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.536567 4852 generic.go:334] "Generic (PLEG): container finished" podID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerID="bbf4f2c362f4dd8d90bbd1d6fa0d36ef52086d6c4274363200845da86fab43ca" exitCode=0 Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.536859 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerDied","Data":"bbf4f2c362f4dd8d90bbd1d6fa0d36ef52086d6c4274363200845da86fab43ca"} Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.653102 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.834057 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config\") pod \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.834157 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb\") pod \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.834183 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc\") pod \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.834246 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb\") pod \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.834278 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7khq7\" (UniqueName: \"kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7\") pod \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\" (UID: \"b4d5df6d-07ea-4247-8be4-7771c7c4124d\") " Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.841529 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7" (OuterVolumeSpecName: "kube-api-access-7khq7") pod "b4d5df6d-07ea-4247-8be4-7771c7c4124d" (UID: "b4d5df6d-07ea-4247-8be4-7771c7c4124d"). InnerVolumeSpecName "kube-api-access-7khq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.894519 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b4d5df6d-07ea-4247-8be4-7771c7c4124d" (UID: "b4d5df6d-07ea-4247-8be4-7771c7c4124d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.901782 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b4d5df6d-07ea-4247-8be4-7771c7c4124d" (UID: "b4d5df6d-07ea-4247-8be4-7771c7c4124d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.903932 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b4d5df6d-07ea-4247-8be4-7771c7c4124d" (UID: "b4d5df6d-07ea-4247-8be4-7771c7c4124d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.916978 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config" (OuterVolumeSpecName: "config") pod "b4d5df6d-07ea-4247-8be4-7771c7c4124d" (UID: "b4d5df6d-07ea-4247-8be4-7771c7c4124d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.936346 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.936377 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.936463 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.936493 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4d5df6d-07ea-4247-8be4-7771c7c4124d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:30 crc kubenswrapper[4852]: I0129 12:17:30.936519 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7khq7\" (UniqueName: \"kubernetes.io/projected/b4d5df6d-07ea-4247-8be4-7771c7c4124d-kube-api-access-7khq7\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.549337 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" event={"ID":"b4d5df6d-07ea-4247-8be4-7771c7c4124d","Type":"ContainerDied","Data":"ef2ab725b21058fcc0853dd2695209379a81611c02e0701d83395c28f66daf5e"} Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.549475 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c6cb56f7c-9tgmj" Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.549554 4852 scope.go:117] "RemoveContainer" containerID="bbf4f2c362f4dd8d90bbd1d6fa0d36ef52086d6c4274363200845da86fab43ca" Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.593080 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.594629 4852 scope.go:117] "RemoveContainer" containerID="fe1c00a30168c872a6eafa1796b1bf1ecdf65559c50bb7aad77bcdb7f646abdb" Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.602288 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c6cb56f7c-9tgmj"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.670806 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.671091 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-log" containerID="cri-o://52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.671519 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-api" containerID="cri-o://81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.696106 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.696393 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-log" containerID="cri-o://75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.696472 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-metadata" containerID="cri-o://f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.709546 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.709772 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerName="nova-scheduler-scheduler" containerID="cri-o://ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.722124 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.722432 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" containerID="cri-o://b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" gracePeriod=30 Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.747254 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:31 crc kubenswrapper[4852]: I0129 12:17:31.747473 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a2b60bffa2e680b703b84804f33a875aa7c20dc160f72f720cc6cc42498264df" gracePeriod=30 Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.100230 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.101957 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.103398 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.103443 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerName="nova-scheduler-scheduler" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.145641 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.567330 4852 generic.go:334] "Generic (PLEG): container finished" podID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerID="52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755" exitCode=143 Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.567490 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerDied","Data":"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755"} Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.573683 4852 generic.go:334] "Generic (PLEG): container finished" podID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" containerID="a2b60bffa2e680b703b84804f33a875aa7c20dc160f72f720cc6cc42498264df" exitCode=0 Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.573746 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"add26b63-e3cc-42bc-9e83-5769bf7caa4c","Type":"ContainerDied","Data":"a2b60bffa2e680b703b84804f33a875aa7c20dc160f72f720cc6cc42498264df"} Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.575846 4852 generic.go:334] "Generic (PLEG): container finished" podID="c52dd439-54a2-4094-bffa-42647f21c628" containerID="75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9" exitCode=143 Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.575868 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerDied","Data":"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9"} Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.708636 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.709171 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.710171 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.711814 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:32 crc kubenswrapper[4852]: E0129 12:17:32.711851 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.770685 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle\") pod \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.770738 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmfx8\" (UniqueName: \"kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8\") pod \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.770848 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data\") pod \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\" (UID: \"add26b63-e3cc-42bc-9e83-5769bf7caa4c\") " Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.777880 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8" (OuterVolumeSpecName: "kube-api-access-jmfx8") pod "add26b63-e3cc-42bc-9e83-5769bf7caa4c" (UID: "add26b63-e3cc-42bc-9e83-5769bf7caa4c"). InnerVolumeSpecName "kube-api-access-jmfx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.802839 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data" (OuterVolumeSpecName: "config-data") pod "add26b63-e3cc-42bc-9e83-5769bf7caa4c" (UID: "add26b63-e3cc-42bc-9e83-5769bf7caa4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.834311 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "add26b63-e3cc-42bc-9e83-5769bf7caa4c" (UID: "add26b63-e3cc-42bc-9e83-5769bf7caa4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.876454 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.876507 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmfx8\" (UniqueName: \"kubernetes.io/projected/add26b63-e3cc-42bc-9e83-5769bf7caa4c-kube-api-access-jmfx8\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:32 crc kubenswrapper[4852]: I0129 12:17:32.877648 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/add26b63-e3cc-42bc-9e83-5769bf7caa4c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.475139 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" path="/var/lib/kubelet/pods/b4d5df6d-07ea-4247-8be4-7771c7c4124d/volumes" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.585665 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"add26b63-e3cc-42bc-9e83-5769bf7caa4c","Type":"ContainerDied","Data":"04ff5f9d04aa83b92f435f5775dcc804ebf69bccc48891041350a2a45af7d1ed"} Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.585724 4852 scope.go:117] "RemoveContainer" containerID="a2b60bffa2e680b703b84804f33a875aa7c20dc160f72f720cc6cc42498264df" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.585871 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.616568 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.635359 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.658997 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:33 crc kubenswrapper[4852]: E0129 12:17:33.659426 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="dnsmasq-dns" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.659449 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="dnsmasq-dns" Jan 29 12:17:33 crc kubenswrapper[4852]: E0129 12:17:33.659474 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="init" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.659482 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="init" Jan 29 12:17:33 crc kubenswrapper[4852]: E0129 12:17:33.659501 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.659510 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.659736 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.659775 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4d5df6d-07ea-4247-8be4-7771c7c4124d" containerName="dnsmasq-dns" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.660570 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.671197 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.674003 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.802438 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.802502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.802604 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt9pg\" (UniqueName: \"kubernetes.io/projected/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-kube-api-access-nt9pg\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.904021 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.904078 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt9pg\" (UniqueName: \"kubernetes.io/projected/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-kube-api-access-nt9pg\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.904231 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.909966 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.913111 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.925975 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt9pg\" (UniqueName: \"kubernetes.io/projected/5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97-kube-api-access-nt9pg\") pod \"nova-cell1-novncproxy-0\" (UID: \"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:33 crc kubenswrapper[4852]: I0129 12:17:33.978732 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:34 crc kubenswrapper[4852]: I0129 12:17:34.431497 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 12:17:34 crc kubenswrapper[4852]: I0129 12:17:34.599324 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97","Type":"ContainerStarted","Data":"e66f083ed6b0ed663bec6747a60829d53c6e79cf10b9608189769a6998df0a71"} Jan 29 12:17:34 crc kubenswrapper[4852]: I0129 12:17:34.876274 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:34 crc kubenswrapper[4852]: I0129 12:17:34.877045 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" containerName="nova-cell0-conductor-conductor" containerID="cri-o://a40bffd2289531afe6b19a82ce979b4101f83c743a5faab56ca0481408e45396" gracePeriod=30 Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.417885 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.449879 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.481606 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="add26b63-e3cc-42bc-9e83-5769bf7caa4c" path="/var/lib/kubelet/pods/add26b63-e3cc-42bc-9e83-5769bf7caa4c/volumes" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.564788 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data\") pod \"c52dd439-54a2-4094-bffa-42647f21c628\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.565243 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data\") pod \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.565353 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrkwt\" (UniqueName: \"kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt\") pod \"c52dd439-54a2-4094-bffa-42647f21c628\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.565499 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle\") pod \"c52dd439-54a2-4094-bffa-42647f21c628\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.565977 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs\") pod \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.566060 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle\") pod \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.566105 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs\") pod \"c52dd439-54a2-4094-bffa-42647f21c628\" (UID: \"c52dd439-54a2-4094-bffa-42647f21c628\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.566142 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnchw\" (UniqueName: \"kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw\") pod \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\" (UID: \"ccb3371d-a45f-4aeb-8c60-e69b05c7da69\") " Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.568820 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs" (OuterVolumeSpecName: "logs") pod "ccb3371d-a45f-4aeb-8c60-e69b05c7da69" (UID: "ccb3371d-a45f-4aeb-8c60-e69b05c7da69"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.569724 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs" (OuterVolumeSpecName: "logs") pod "c52dd439-54a2-4094-bffa-42647f21c628" (UID: "c52dd439-54a2-4094-bffa-42647f21c628"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.573948 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw" (OuterVolumeSpecName: "kube-api-access-rnchw") pod "ccb3371d-a45f-4aeb-8c60-e69b05c7da69" (UID: "ccb3371d-a45f-4aeb-8c60-e69b05c7da69"). InnerVolumeSpecName "kube-api-access-rnchw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.589932 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt" (OuterVolumeSpecName: "kube-api-access-wrkwt") pod "c52dd439-54a2-4094-bffa-42647f21c628" (UID: "c52dd439-54a2-4094-bffa-42647f21c628"). InnerVolumeSpecName "kube-api-access-wrkwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.597075 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c52dd439-54a2-4094-bffa-42647f21c628" (UID: "c52dd439-54a2-4094-bffa-42647f21c628"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.604787 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data" (OuterVolumeSpecName: "config-data") pod "c52dd439-54a2-4094-bffa-42647f21c628" (UID: "c52dd439-54a2-4094-bffa-42647f21c628"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.611257 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ccb3371d-a45f-4aeb-8c60-e69b05c7da69" (UID: "ccb3371d-a45f-4aeb-8c60-e69b05c7da69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.611659 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data" (OuterVolumeSpecName: "config-data") pod "ccb3371d-a45f-4aeb-8c60-e69b05c7da69" (UID: "ccb3371d-a45f-4aeb-8c60-e69b05c7da69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.612076 4852 generic.go:334] "Generic (PLEG): container finished" podID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerID="81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0" exitCode=0 Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.612105 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerDied","Data":"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0"} Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.612620 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ccb3371d-a45f-4aeb-8c60-e69b05c7da69","Type":"ContainerDied","Data":"6ef26556f2e77f0f1cd95b9583fd3fa4e7b8ad987b3793a311ed863542a4815e"} Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.612217 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.612734 4852 scope.go:117] "RemoveContainer" containerID="81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.622062 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97","Type":"ContainerStarted","Data":"09801bd1f2540ad22ee3b64b74bffd63e774b95b43c05739273f0cbf6bbb1f9b"} Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.643761 4852 generic.go:334] "Generic (PLEG): container finished" podID="c52dd439-54a2-4094-bffa-42647f21c628" containerID="f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f" exitCode=0 Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.643911 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerDied","Data":"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f"} Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.644365 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c52dd439-54a2-4094-bffa-42647f21c628","Type":"ContainerDied","Data":"401c28c289c192654252383f81d6f2a8fdf7b8faa7af5b035f02446d78f64bf6"} Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.644165 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.644531 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.644518196 podStartE2EDuration="2.644518196s" podCreationTimestamp="2026-01-29 12:17:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:35.643891641 +0000 UTC m=+5752.861222785" watchObservedRunningTime="2026-01-29 12:17:35.644518196 +0000 UTC m=+5752.861849330" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668126 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668148 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668160 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668169 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c52dd439-54a2-4094-bffa-42647f21c628-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668179 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnchw\" (UniqueName: \"kubernetes.io/projected/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-kube-api-access-rnchw\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668188 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c52dd439-54a2-4094-bffa-42647f21c628-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668196 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccb3371d-a45f-4aeb-8c60-e69b05c7da69-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.668204 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrkwt\" (UniqueName: \"kubernetes.io/projected/c52dd439-54a2-4094-bffa-42647f21c628-kube-api-access-wrkwt\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.740439 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.751898 4852 scope.go:117] "RemoveContainer" containerID="52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.763676 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.805656 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.814806 4852 scope.go:117] "RemoveContainer" containerID="81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.817686 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.818695 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0\": container with ID starting with 81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0 not found: ID does not exist" containerID="81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.818740 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0"} err="failed to get container status \"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0\": rpc error: code = NotFound desc = could not find container \"81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0\": container with ID starting with 81164744e6266352b3fc08e8f2252505480e33216fbcbc2d6f096bb817d1b1e0 not found: ID does not exist" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.818774 4852 scope.go:117] "RemoveContainer" containerID="52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755" Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.826711 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755\": container with ID starting with 52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755 not found: ID does not exist" containerID="52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.826754 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755"} err="failed to get container status \"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755\": rpc error: code = NotFound desc = could not find container \"52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755\": container with ID starting with 52ffd6cfa4bed3e00c33a0068dd61a4daa01f57e512584956c30bc41ea0a2755 not found: ID does not exist" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.826775 4852 scope.go:117] "RemoveContainer" containerID="f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.830649 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.831031 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-api" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831046 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-api" Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.831057 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-log" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831063 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-log" Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.831087 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-log" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831095 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-log" Jan 29 12:17:35 crc kubenswrapper[4852]: E0129 12:17:35.831120 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-metadata" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831126 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-metadata" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831292 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-log" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831305 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-log" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831322 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52dd439-54a2-4094-bffa-42647f21c628" containerName="nova-metadata-metadata" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.831332 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" containerName="nova-api-api" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.832273 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.835950 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.867708 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.957774 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:17:35 crc kubenswrapper[4852]: I0129 12:17:35.963063 4852 scope.go:117] "RemoveContainer" containerID="75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.039960 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.047981 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.048180 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.048210 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.048341 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5z4g\" (UniqueName: \"kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.048365 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.056257 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.094659 4852 scope.go:117] "RemoveContainer" containerID="f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f" Jan 29 12:17:36 crc kubenswrapper[4852]: E0129 12:17:36.095114 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f\": container with ID starting with f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f not found: ID does not exist" containerID="f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.095145 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f"} err="failed to get container status \"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f\": rpc error: code = NotFound desc = could not find container \"f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f\": container with ID starting with f0d9f39e508e582d514a66e9453f88a925a6c441debe4f98301575059cf2ef6f not found: ID does not exist" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.095169 4852 scope.go:117] "RemoveContainer" containerID="75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9" Jan 29 12:17:36 crc kubenswrapper[4852]: E0129 12:17:36.095350 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9\": container with ID starting with 75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9 not found: ID does not exist" containerID="75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.095369 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9"} err="failed to get container status \"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9\": rpc error: code = NotFound desc = could not find container \"75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9\": container with ID starting with 75058a17dc05d702a5eab0f70d915795623e753d79afe5d100ca7bffe87558e9 not found: ID does not exist" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150212 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150288 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150348 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150388 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150494 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5z4g\" (UniqueName: \"kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150524 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.150548 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb99w\" (UniqueName: \"kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.151066 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.159942 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.168907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5z4g\" (UniqueName: \"kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.169400 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.251489 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb99w\" (UniqueName: \"kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.251546 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.251576 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.251611 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.252378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.255546 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.256768 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.274301 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb99w\" (UniqueName: \"kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w\") pod \"nova-api-0\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.353508 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.371214 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.537126 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.556141 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data\") pod \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.556312 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle\") pod \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.556439 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcxcl\" (UniqueName: \"kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl\") pod \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\" (UID: \"cb9fc717-2f1e-48f4-b648-084429d4c3ef\") " Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.568789 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl" (OuterVolumeSpecName: "kube-api-access-fcxcl") pod "cb9fc717-2f1e-48f4-b648-084429d4c3ef" (UID: "cb9fc717-2f1e-48f4-b648-084429d4c3ef"). InnerVolumeSpecName "kube-api-access-fcxcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.587746 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb9fc717-2f1e-48f4-b648-084429d4c3ef" (UID: "cb9fc717-2f1e-48f4-b648-084429d4c3ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.602283 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data" (OuterVolumeSpecName: "config-data") pod "cb9fc717-2f1e-48f4-b648-084429d4c3ef" (UID: "cb9fc717-2f1e-48f4-b648-084429d4c3ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.678251 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcxcl\" (UniqueName: \"kubernetes.io/projected/cb9fc717-2f1e-48f4-b648-084429d4c3ef-kube-api-access-fcxcl\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.678283 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.678346 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb9fc717-2f1e-48f4-b648-084429d4c3ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.689414 4852 generic.go:334] "Generic (PLEG): container finished" podID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" exitCode=0 Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.689491 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb9fc717-2f1e-48f4-b648-084429d4c3ef","Type":"ContainerDied","Data":"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc"} Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.689527 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cb9fc717-2f1e-48f4-b648-084429d4c3ef","Type":"ContainerDied","Data":"9dd3219acd74129cca02bb3dfe3722e707435afa6a848789678ecec0619cf161"} Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.689552 4852 scope.go:117] "RemoveContainer" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.689710 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.724655 4852 scope.go:117] "RemoveContainer" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" Jan 29 12:17:36 crc kubenswrapper[4852]: E0129 12:17:36.725435 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc\": container with ID starting with ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc not found: ID does not exist" containerID="ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.725479 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc"} err="failed to get container status \"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc\": rpc error: code = NotFound desc = could not find container \"ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc\": container with ID starting with ef352f7c47026962c013a84ac520699526898035cff5e328564ada3a16944dcc not found: ID does not exist" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.778544 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.790608 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.807326 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: E0129 12:17:36.807756 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerName="nova-scheduler-scheduler" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.807769 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerName="nova-scheduler-scheduler" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.807966 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" containerName="nova-scheduler-scheduler" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.808562 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.816765 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.816861 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.867701 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.982878 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.983213 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:36 crc kubenswrapper[4852]: I0129 12:17:36.983242 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d9rn\" (UniqueName: \"kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.084810 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.084875 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.084894 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d9rn\" (UniqueName: \"kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.090041 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.091366 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.103173 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d9rn\" (UniqueName: \"kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn\") pod \"nova-scheduler-0\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.126125 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 12:17:37 crc kubenswrapper[4852]: W0129 12:17:37.131384 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b915d83_fdef_4d3a_8079_26b0cd0a956c.slice/crio-03b10d7a945657830360dc536f019bfaf055245a6d7e3f1542bdde06ccd21d43 WatchSource:0}: Error finding container 03b10d7a945657830360dc536f019bfaf055245a6d7e3f1542bdde06ccd21d43: Status 404 returned error can't find the container with id 03b10d7a945657830360dc536f019bfaf055245a6d7e3f1542bdde06ccd21d43 Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.134640 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.476684 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52dd439-54a2-4094-bffa-42647f21c628" path="/var/lib/kubelet/pods/c52dd439-54a2-4094-bffa-42647f21c628/volumes" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.477916 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb9fc717-2f1e-48f4-b648-084429d4c3ef" path="/var/lib/kubelet/pods/cb9fc717-2f1e-48f4-b648-084429d4c3ef/volumes" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.478527 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccb3371d-a45f-4aeb-8c60-e69b05c7da69" path="/var/lib/kubelet/pods/ccb3371d-a45f-4aeb-8c60-e69b05c7da69/volumes" Jan 29 12:17:37 crc kubenswrapper[4852]: E0129 12:17:37.709407 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8 is running failed: container process not found" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:37 crc kubenswrapper[4852]: E0129 12:17:37.710258 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8 is running failed: container process not found" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:37 crc kubenswrapper[4852]: E0129 12:17:37.710569 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8 is running failed: container process not found" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 12:17:37 crc kubenswrapper[4852]: E0129 12:17:37.710628 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.719818 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerStarted","Data":"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.719871 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerStarted","Data":"03b10d7a945657830360dc536f019bfaf055245a6d7e3f1542bdde06ccd21d43"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.724749 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerStarted","Data":"d0e9c5f01424616b0bfadafd00ea22e49f5c98bb771f69a655780f61ba81c303"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.724794 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerStarted","Data":"9b4f777a246caab0b8394d8c852f616f25a1e6a20801f90e5a01b52249926ed4"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.724808 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerStarted","Data":"e2f1d19d2f18cbbdb942f3fd9f18c75322dcd5c3487f9e628e05a6a54559c270"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.734155 4852 generic.go:334] "Generic (PLEG): container finished" podID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" containerID="a40bffd2289531afe6b19a82ce979b4101f83c743a5faab56ca0481408e45396" exitCode=0 Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.734219 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8634fab6-800d-4f0a-8c2f-4b22b89048ae","Type":"ContainerDied","Data":"a40bffd2289531afe6b19a82ce979b4101f83c743a5faab56ca0481408e45396"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.741389 4852 generic.go:334] "Generic (PLEG): container finished" podID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" exitCode=0 Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.741424 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8c5f61b-27a7-4756-996f-b58b641a2ebc","Type":"ContainerDied","Data":"b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8"} Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.751089 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.765595 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.765547589 podStartE2EDuration="2.765547589s" podCreationTimestamp="2026-01-29 12:17:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:37.74631415 +0000 UTC m=+5754.963645304" watchObservedRunningTime="2026-01-29 12:17:37.765547589 +0000 UTC m=+5754.982878723" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.868270 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.878946 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.907157 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data\") pod \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.907670 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle\") pod \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.907750 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bkft\" (UniqueName: \"kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft\") pod \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\" (UID: \"b8c5f61b-27a7-4756-996f-b58b641a2ebc\") " Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.912883 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft" (OuterVolumeSpecName: "kube-api-access-2bkft") pod "b8c5f61b-27a7-4756-996f-b58b641a2ebc" (UID: "b8c5f61b-27a7-4756-996f-b58b641a2ebc"). InnerVolumeSpecName "kube-api-access-2bkft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.940345 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8c5f61b-27a7-4756-996f-b58b641a2ebc" (UID: "b8c5f61b-27a7-4756-996f-b58b641a2ebc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:37 crc kubenswrapper[4852]: I0129 12:17:37.953526 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data" (OuterVolumeSpecName: "config-data") pod "b8c5f61b-27a7-4756-996f-b58b641a2ebc" (UID: "b8c5f61b-27a7-4756-996f-b58b641a2ebc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.009295 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njf9q\" (UniqueName: \"kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q\") pod \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.009373 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle\") pod \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.009408 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data\") pod \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\" (UID: \"8634fab6-800d-4f0a-8c2f-4b22b89048ae\") " Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.009966 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.009989 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8c5f61b-27a7-4756-996f-b58b641a2ebc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.010002 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bkft\" (UniqueName: \"kubernetes.io/projected/b8c5f61b-27a7-4756-996f-b58b641a2ebc-kube-api-access-2bkft\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.012358 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q" (OuterVolumeSpecName: "kube-api-access-njf9q") pod "8634fab6-800d-4f0a-8c2f-4b22b89048ae" (UID: "8634fab6-800d-4f0a-8c2f-4b22b89048ae"). InnerVolumeSpecName "kube-api-access-njf9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.036894 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8634fab6-800d-4f0a-8c2f-4b22b89048ae" (UID: "8634fab6-800d-4f0a-8c2f-4b22b89048ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.039690 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data" (OuterVolumeSpecName: "config-data") pod "8634fab6-800d-4f0a-8c2f-4b22b89048ae" (UID: "8634fab6-800d-4f0a-8c2f-4b22b89048ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.111882 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njf9q\" (UniqueName: \"kubernetes.io/projected/8634fab6-800d-4f0a-8c2f-4b22b89048ae-kube-api-access-njf9q\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.111928 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.111944 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8634fab6-800d-4f0a-8c2f-4b22b89048ae-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.752690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerStarted","Data":"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a"} Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.756285 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3f441bde-6fef-42f7-8a6d-9cd7eceb019e","Type":"ContainerStarted","Data":"ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d"} Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.756347 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3f441bde-6fef-42f7-8a6d-9cd7eceb019e","Type":"ContainerStarted","Data":"968f6727dfec0caa675fb3c89cad632d351db65c3ef86a7cb6ec11a8149a8d5d"} Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.758076 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.758076 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8634fab6-800d-4f0a-8c2f-4b22b89048ae","Type":"ContainerDied","Data":"7779d743bca3a3e442dc94747bf4542fc7e7e7416909708c0ab646e5aa5d3009"} Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.758199 4852 scope.go:117] "RemoveContainer" containerID="a40bffd2289531afe6b19a82ce979b4101f83c743a5faab56ca0481408e45396" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.778330 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.778313915 podStartE2EDuration="3.778313915s" podCreationTimestamp="2026-01-29 12:17:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:38.770034184 +0000 UTC m=+5755.987365318" watchObservedRunningTime="2026-01-29 12:17:38.778313915 +0000 UTC m=+5755.995645049" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.782949 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.783042 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8c5f61b-27a7-4756-996f-b58b641a2ebc","Type":"ContainerDied","Data":"9f75cc94b449505d26441d8e1ea3e3846db37769cdd558463290c7bfb9857edb"} Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.790457 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.790440482 podStartE2EDuration="2.790440482s" podCreationTimestamp="2026-01-29 12:17:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:38.784916787 +0000 UTC m=+5756.002247921" watchObservedRunningTime="2026-01-29 12:17:38.790440482 +0000 UTC m=+5756.007771606" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.802535 4852 scope.go:117] "RemoveContainer" containerID="b1cce45f1f8585bbb857c5bdaf6fa1730b01f5b5d0ddc40a9f89d309762beec8" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.824275 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.843224 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.859379 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: E0129 12:17:38.859830 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" containerName="nova-cell0-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.859859 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" containerName="nova-cell0-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: E0129 12:17:38.859900 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.859912 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.860105 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" containerName="nova-cell0-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.860136 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" containerName="nova-cell1-conductor-conductor" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.861084 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.873451 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.874056 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.900873 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.907871 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.916123 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.917433 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.919404 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.938333 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:38 crc kubenswrapper[4852]: I0129 12:17:38.980107 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.028580 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.028757 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.028808 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.028846 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kgpw\" (UniqueName: \"kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.028863 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.029771 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm4tz\" (UniqueName: \"kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131447 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm4tz\" (UniqueName: \"kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131530 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131642 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131692 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131731 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kgpw\" (UniqueName: \"kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.131756 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.138617 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.141831 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.152981 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm4tz\" (UniqueName: \"kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.155088 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.158024 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kgpw\" (UniqueName: \"kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw\") pod \"nova-cell1-conductor-0\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.165731 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.191374 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.237926 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.463836 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:17:39 crc kubenswrapper[4852]: E0129 12:17:39.464609 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.474770 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8634fab6-800d-4f0a-8c2f-4b22b89048ae" path="/var/lib/kubelet/pods/8634fab6-800d-4f0a-8c2f-4b22b89048ae/volumes" Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.475697 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8c5f61b-27a7-4756-996f-b58b641a2ebc" path="/var/lib/kubelet/pods/b8c5f61b-27a7-4756-996f-b58b641a2ebc/volumes" Jan 29 12:17:39 crc kubenswrapper[4852]: W0129 12:17:39.642358 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66a97aee_41b4_454c_a584_9f0195bce766.slice/crio-a8367f59189b421e33b330cb2134bc2c72e2e2c820769783be595eb5fd33393b WatchSource:0}: Error finding container a8367f59189b421e33b330cb2134bc2c72e2e2c820769783be595eb5fd33393b: Status 404 returned error can't find the container with id a8367f59189b421e33b330cb2134bc2c72e2e2c820769783be595eb5fd33393b Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.647114 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.777017 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 12:17:39 crc kubenswrapper[4852]: W0129 12:17:39.781991 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode607ecc3_25cf_4681_85cc_26f1634e8cdb.slice/crio-7e09026c9dd62eea31a90beb318a5b0bf725149139eecf61110b83d299965584 WatchSource:0}: Error finding container 7e09026c9dd62eea31a90beb318a5b0bf725149139eecf61110b83d299965584: Status 404 returned error can't find the container with id 7e09026c9dd62eea31a90beb318a5b0bf725149139eecf61110b83d299965584 Jan 29 12:17:39 crc kubenswrapper[4852]: I0129 12:17:39.797733 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"66a97aee-41b4-454c-a584-9f0195bce766","Type":"ContainerStarted","Data":"a8367f59189b421e33b330cb2134bc2c72e2e2c820769783be595eb5fd33393b"} Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.819544 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e607ecc3-25cf-4681-85cc-26f1634e8cdb","Type":"ContainerStarted","Data":"02fd37a25c66dbf7adaf56353b54e230c1f14f0e00caef2447ef34c4fd1f6452"} Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.820446 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.820678 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e607ecc3-25cf-4681-85cc-26f1634e8cdb","Type":"ContainerStarted","Data":"7e09026c9dd62eea31a90beb318a5b0bf725149139eecf61110b83d299965584"} Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.822100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"66a97aee-41b4-454c-a584-9f0195bce766","Type":"ContainerStarted","Data":"020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa"} Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.823490 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.850546 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.8505230580000003 podStartE2EDuration="2.850523058s" podCreationTimestamp="2026-01-29 12:17:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:40.834956108 +0000 UTC m=+5758.052287252" watchObservedRunningTime="2026-01-29 12:17:40.850523058 +0000 UTC m=+5758.067854202" Jan 29 12:17:40 crc kubenswrapper[4852]: I0129 12:17:40.865436 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.865421381 podStartE2EDuration="2.865421381s" podCreationTimestamp="2026-01-29 12:17:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:40.857330294 +0000 UTC m=+5758.074661438" watchObservedRunningTime="2026-01-29 12:17:40.865421381 +0000 UTC m=+5758.082752515" Jan 29 12:17:41 crc kubenswrapper[4852]: I0129 12:17:41.371777 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:17:41 crc kubenswrapper[4852]: I0129 12:17:41.372062 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 12:17:42 crc kubenswrapper[4852]: I0129 12:17:42.136048 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 12:17:43 crc kubenswrapper[4852]: I0129 12:17:43.980551 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:43 crc kubenswrapper[4852]: I0129 12:17:43.994021 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:44 crc kubenswrapper[4852]: I0129 12:17:44.218340 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 12:17:44 crc kubenswrapper[4852]: I0129 12:17:44.280353 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 12:17:44 crc kubenswrapper[4852]: I0129 12:17:44.881317 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 12:17:46 crc kubenswrapper[4852]: I0129 12:17:46.372100 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:17:46 crc kubenswrapper[4852]: I0129 12:17:46.372172 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 12:17:46 crc kubenswrapper[4852]: I0129 12:17:46.537982 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:17:46 crc kubenswrapper[4852]: I0129 12:17:46.538062 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.136160 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.166162 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.414216 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.414257 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.620823 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.620818 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:17:47 crc kubenswrapper[4852]: I0129 12:17:47.940968 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.069804 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.071541 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.074498 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.091892 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.174903 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.175020 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.175066 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lvgd\" (UniqueName: \"kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.175750 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.175841 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.175883 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277600 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lvgd\" (UniqueName: \"kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277739 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277780 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277822 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.277904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.278019 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.284442 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.284997 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.292105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.295163 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.299183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lvgd\" (UniqueName: \"kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd\") pod \"cinder-scheduler-0\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.403294 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.867758 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:17:50 crc kubenswrapper[4852]: W0129 12:17:50.880092 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbce8da7e_64df_4d22_a514_4a3fd7527813.slice/crio-421516ba54f2c24d559b55dc17bd4b5241e3a121a184f4b7841d07ca93ff9922 WatchSource:0}: Error finding container 421516ba54f2c24d559b55dc17bd4b5241e3a121a184f4b7841d07ca93ff9922: Status 404 returned error can't find the container with id 421516ba54f2c24d559b55dc17bd4b5241e3a121a184f4b7841d07ca93ff9922 Jan 29 12:17:50 crc kubenswrapper[4852]: I0129 12:17:50.963829 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerStarted","Data":"421516ba54f2c24d559b55dc17bd4b5241e3a121a184f4b7841d07ca93ff9922"} Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.799028 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.800154 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api-log" containerID="cri-o://1b330a5a09461ed1c44cd8950b2c57ef4ae2053e02d2d8aeb68f3deb078dca06" gracePeriod=30 Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.800559 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api" containerID="cri-o://c1fd62853b57ad8f7bd89e942353cfd657bb5fdabcfaaf5daa691dc3494cd8aa" gracePeriod=30 Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.987245 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerStarted","Data":"00df46d30f2dec062835699d53703fce86a077ba5310636fe053a7af565d563f"} Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.988957 4852 generic.go:334] "Generic (PLEG): container finished" podID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerID="1b330a5a09461ed1c44cd8950b2c57ef4ae2053e02d2d8aeb68f3deb078dca06" exitCode=143 Jan 29 12:17:51 crc kubenswrapper[4852]: I0129 12:17:51.988982 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerDied","Data":"1b330a5a09461ed1c44cd8950b2c57ef4ae2053e02d2d8aeb68f3deb078dca06"} Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.331799 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.334092 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.349980 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.350542 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425043 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425108 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425183 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425214 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbk7s\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-kube-api-access-vbk7s\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425264 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425379 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-run\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425427 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425458 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-sys\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425516 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425554 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425662 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425722 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425788 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425819 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425853 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.425955 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-dev\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.463484 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:17:52 crc kubenswrapper[4852]: E0129 12:17:52.463897 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528195 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-run\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528472 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528572 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-sys\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528652 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-sys\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528406 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-run\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528621 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528881 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.528973 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529046 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529070 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529177 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529298 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529344 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529440 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-dev\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529502 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529553 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529671 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529743 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbk7s\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-kube-api-access-vbk7s\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529845 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.529849 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.530047 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.530141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.530243 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-dev\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.530298 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/396524c3-a9fc-4041-a7bf-86f088a7b1ea-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.534869 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.535453 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.546695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.547440 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.551828 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/396524c3-a9fc-4041-a7bf-86f088a7b1ea-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.559908 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbk7s\" (UniqueName: \"kubernetes.io/projected/396524c3-a9fc-4041-a7bf-86f088a7b1ea-kube-api-access-vbk7s\") pod \"cinder-volume-volume1-0\" (UID: \"396524c3-a9fc-4041-a7bf-86f088a7b1ea\") " pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.659999 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.882562 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.884441 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.887940 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.909423 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 29 12:17:52 crc kubenswrapper[4852]: I0129 12:17:52.998236 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerStarted","Data":"fab847c52218d5cbd59c21f12ec41422b8e5377ace09fcee3e01fd4b034446f8"} Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.027146 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.027117473 podStartE2EDuration="3.027117473s" podCreationTimestamp="2026-01-29 12:17:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:53.022161421 +0000 UTC m=+5770.239492555" watchObservedRunningTime="2026-01-29 12:17:53.027117473 +0000 UTC m=+5770.244448607" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047758 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-ceph\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047827 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-sys\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047844 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkncv\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-kube-api-access-mkncv\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047869 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047884 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047897 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-dev\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047924 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047944 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-lib-modules\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047977 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.047993 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048007 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048026 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048043 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-scripts\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048064 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-run\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data-custom\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.048111 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-nvme\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.149834 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-run\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.149902 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data-custom\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.149924 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-nvme\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.149990 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-ceph\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150043 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-sys\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150060 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkncv\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-kube-api-access-mkncv\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150102 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150126 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150145 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-dev\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150170 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150204 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-lib-modules\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150245 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150283 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150300 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150330 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.150352 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-scripts\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.151310 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-run\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.151785 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.151846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-nvme\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.153480 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.153525 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-dev\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.155694 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-lib-modules\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.156071 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-sys\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.156141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.156182 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.156237 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/65005606-1bf1-42ed-99fd-afc7949c61a0-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.157309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.157641 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-ceph\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.158208 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.159519 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-config-data-custom\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.159531 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65005606-1bf1-42ed-99fd-afc7949c61a0-scripts\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.171605 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkncv\" (UniqueName: \"kubernetes.io/projected/65005606-1bf1-42ed-99fd-afc7949c61a0-kube-api-access-mkncv\") pod \"cinder-backup-0\" (UID: \"65005606-1bf1-42ed-99fd-afc7949c61a0\") " pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.245635 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.266489 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 29 12:17:53 crc kubenswrapper[4852]: W0129 12:17:53.274329 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod396524c3_a9fc_4041_a7bf_86f088a7b1ea.slice/crio-9fcb1a2671f192bdc947f1035a41621afe93b14382fd8f35ca1381cb3564f6a2 WatchSource:0}: Error finding container 9fcb1a2671f192bdc947f1035a41621afe93b14382fd8f35ca1381cb3564f6a2: Status 404 returned error can't find the container with id 9fcb1a2671f192bdc947f1035a41621afe93b14382fd8f35ca1381cb3564f6a2 Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.279448 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:17:53 crc kubenswrapper[4852]: I0129 12:17:53.754364 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 29 12:17:54 crc kubenswrapper[4852]: I0129 12:17:54.009791 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"65005606-1bf1-42ed-99fd-afc7949c61a0","Type":"ContainerStarted","Data":"44af59c93f305f9abe2d2553c47b1493435840dc16bf1a9c1fdf291096925d26"} Jan 29 12:17:54 crc kubenswrapper[4852]: I0129 12:17:54.011279 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"396524c3-a9fc-4041-a7bf-86f088a7b1ea","Type":"ContainerStarted","Data":"9fcb1a2671f192bdc947f1035a41621afe93b14382fd8f35ca1381cb3564f6a2"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.040093 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"65005606-1bf1-42ed-99fd-afc7949c61a0","Type":"ContainerStarted","Data":"061a2921dc2973114b1952f9bc09737d136c1db86d8af2faa624ad499aa1823b"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.040637 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"65005606-1bf1-42ed-99fd-afc7949c61a0","Type":"ContainerStarted","Data":"507728f3d1bb8406e7e1accde565dbf2bec57cf7883c3cc6cf5dee1847243e4a"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.045183 4852 generic.go:334] "Generic (PLEG): container finished" podID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerID="c1fd62853b57ad8f7bd89e942353cfd657bb5fdabcfaaf5daa691dc3494cd8aa" exitCode=0 Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.045277 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerDied","Data":"c1fd62853b57ad8f7bd89e942353cfd657bb5fdabcfaaf5daa691dc3494cd8aa"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.047259 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"396524c3-a9fc-4041-a7bf-86f088a7b1ea","Type":"ContainerStarted","Data":"1ae48f48904f4e24d2c7062ae9d1a8edb206988306dac1d7ef2803666914dd88"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.047316 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"396524c3-a9fc-4041-a7bf-86f088a7b1ea","Type":"ContainerStarted","Data":"ca198c9cc6e503a10aa4341ab18bc81cd012981e5c865a37939b4973a97db7bc"} Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.068069 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.348090146 podStartE2EDuration="3.068048622s" podCreationTimestamp="2026-01-29 12:17:52 +0000 UTC" firstStartedPulling="2026-01-29 12:17:53.758725274 +0000 UTC m=+5770.976056408" lastFinishedPulling="2026-01-29 12:17:54.47868375 +0000 UTC m=+5771.696014884" observedRunningTime="2026-01-29 12:17:55.062346723 +0000 UTC m=+5772.279677877" watchObservedRunningTime="2026-01-29 12:17:55.068048622 +0000 UTC m=+5772.285379756" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.088683 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=2.325659178 podStartE2EDuration="3.088661584s" podCreationTimestamp="2026-01-29 12:17:52 +0000 UTC" firstStartedPulling="2026-01-29 12:17:53.278893933 +0000 UTC m=+5770.496225067" lastFinishedPulling="2026-01-29 12:17:54.041896329 +0000 UTC m=+5771.259227473" observedRunningTime="2026-01-29 12:17:55.079891181 +0000 UTC m=+5772.297222315" watchObservedRunningTime="2026-01-29 12:17:55.088661584 +0000 UTC m=+5772.305992728" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.353845 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.403883 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.497820 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.497888 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.498059 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.498289 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.498381 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tr9t\" (UniqueName: \"kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.498839 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.498895 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs\") pod \"806efd2e-156e-407f-a9f9-26eee522e9ef\" (UID: \"806efd2e-156e-407f-a9f9-26eee522e9ef\") " Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.499881 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.500808 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs" (OuterVolumeSpecName: "logs") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.506756 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.508383 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts" (OuterVolumeSpecName: "scripts") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.519891 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t" (OuterVolumeSpecName: "kube-api-access-7tr9t") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "kube-api-access-7tr9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.531848 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.554317 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data" (OuterVolumeSpecName: "config-data") pod "806efd2e-156e-407f-a9f9-26eee522e9ef" (UID: "806efd2e-156e-407f-a9f9-26eee522e9ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600687 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600733 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tr9t\" (UniqueName: \"kubernetes.io/projected/806efd2e-156e-407f-a9f9-26eee522e9ef-kube-api-access-7tr9t\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600749 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/806efd2e-156e-407f-a9f9-26eee522e9ef-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600765 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/806efd2e-156e-407f-a9f9-26eee522e9ef-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600777 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600788 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:55 crc kubenswrapper[4852]: I0129 12:17:55.600799 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/806efd2e-156e-407f-a9f9-26eee522e9ef-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.059453 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"806efd2e-156e-407f-a9f9-26eee522e9ef","Type":"ContainerDied","Data":"ca757aa4e6179e4657268faf1a1cadae8cd38eb93b25d86f4820d6c5f989bcc4"} Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.059560 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.059874 4852 scope.go:117] "RemoveContainer" containerID="c1fd62853b57ad8f7bd89e942353cfd657bb5fdabcfaaf5daa691dc3494cd8aa" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.108436 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.110343 4852 scope.go:117] "RemoveContainer" containerID="1b330a5a09461ed1c44cd8950b2c57ef4ae2053e02d2d8aeb68f3deb078dca06" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.120441 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.133922 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:56 crc kubenswrapper[4852]: E0129 12:17:56.134384 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api-log" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.134403 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api-log" Jan 29 12:17:56 crc kubenswrapper[4852]: E0129 12:17:56.134439 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.134449 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.134689 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api-log" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.134706 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.136002 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.139956 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.150872 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.314890 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/664f10e3-cc63-4eb5-8474-0dec1f35b938-logs\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.315371 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/664f10e3-cc63-4eb5-8474-0dec1f35b938-etc-machine-id\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.315455 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data-custom\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.315553 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.315793 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.315887 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-scripts\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.316123 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmd9k\" (UniqueName: \"kubernetes.io/projected/664f10e3-cc63-4eb5-8474-0dec1f35b938-kube-api-access-vmd9k\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.374905 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.377353 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.378682 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.419902 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/664f10e3-cc63-4eb5-8474-0dec1f35b938-logs\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.419959 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/664f10e3-cc63-4eb5-8474-0dec1f35b938-etc-machine-id\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.419995 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data-custom\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.420042 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.420100 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.420171 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-scripts\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.420242 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmd9k\" (UniqueName: \"kubernetes.io/projected/664f10e3-cc63-4eb5-8474-0dec1f35b938-kube-api-access-vmd9k\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.421741 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/664f10e3-cc63-4eb5-8474-0dec1f35b938-logs\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.421800 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/664f10e3-cc63-4eb5-8474-0dec1f35b938-etc-machine-id\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.427820 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.429422 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.438649 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-scripts\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.438915 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/664f10e3-cc63-4eb5-8474-0dec1f35b938-config-data-custom\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.439791 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmd9k\" (UniqueName: \"kubernetes.io/projected/664f10e3-cc63-4eb5-8474-0dec1f35b938-kube-api-access-vmd9k\") pod \"cinder-api-0\" (UID: \"664f10e3-cc63-4eb5-8474-0dec1f35b938\") " pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.460565 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.554211 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.555654 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.556150 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.562123 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 12:17:56 crc kubenswrapper[4852]: I0129 12:17:56.984647 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.073722 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"664f10e3-cc63-4eb5-8474-0dec1f35b938","Type":"ContainerStarted","Data":"2dd1454f9cf0242cc574313238c6f316f981395902fbcc4f66eb7a5043961c28"} Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.074624 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.080893 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.082010 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.504891 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" path="/var/lib/kubelet/pods/806efd2e-156e-407f-a9f9-26eee522e9ef/volumes" Jan 29 12:17:57 crc kubenswrapper[4852]: I0129 12:17:57.660675 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Jan 29 12:17:58 crc kubenswrapper[4852]: I0129 12:17:58.087282 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"664f10e3-cc63-4eb5-8474-0dec1f35b938","Type":"ContainerStarted","Data":"d16145524c48d803799ac2ca3feb8782605f975a4ea1e3d94e2c6faee5107cb4"} Jan 29 12:17:58 crc kubenswrapper[4852]: I0129 12:17:58.245870 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Jan 29 12:17:59 crc kubenswrapper[4852]: I0129 12:17:59.096346 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"664f10e3-cc63-4eb5-8474-0dec1f35b938","Type":"ContainerStarted","Data":"79fb8af0d9e30b5c039d9cd3a6056e9bc9528ba6c88fcea4ad6eb433b72154cd"} Jan 29 12:17:59 crc kubenswrapper[4852]: I0129 12:17:59.135289 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.135268954 podStartE2EDuration="3.135268954s" podCreationTimestamp="2026-01-29 12:17:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:17:59.116306911 +0000 UTC m=+5776.333638065" watchObservedRunningTime="2026-01-29 12:17:59.135268954 +0000 UTC m=+5776.352600098" Jan 29 12:18:00 crc kubenswrapper[4852]: I0129 12:18:00.105978 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 12:18:00 crc kubenswrapper[4852]: I0129 12:18:00.308321 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="806efd2e-156e-407f-a9f9-26eee522e9ef" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.1.80:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:18:00 crc kubenswrapper[4852]: I0129 12:18:00.627009 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 12:18:00 crc kubenswrapper[4852]: I0129 12:18:00.677444 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:01 crc kubenswrapper[4852]: I0129 12:18:01.115220 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="cinder-scheduler" containerID="cri-o://00df46d30f2dec062835699d53703fce86a077ba5310636fe053a7af565d563f" gracePeriod=30 Jan 29 12:18:01 crc kubenswrapper[4852]: I0129 12:18:01.115363 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="probe" containerID="cri-o://fab847c52218d5cbd59c21f12ec41422b8e5377ace09fcee3e01fd4b034446f8" gracePeriod=30 Jan 29 12:18:02 crc kubenswrapper[4852]: I0129 12:18:02.132304 4852 generic.go:334] "Generic (PLEG): container finished" podID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerID="fab847c52218d5cbd59c21f12ec41422b8e5377ace09fcee3e01fd4b034446f8" exitCode=0 Jan 29 12:18:02 crc kubenswrapper[4852]: I0129 12:18:02.132408 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerDied","Data":"fab847c52218d5cbd59c21f12ec41422b8e5377ace09fcee3e01fd4b034446f8"} Jan 29 12:18:02 crc kubenswrapper[4852]: I0129 12:18:02.867295 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.148209 4852 generic.go:334] "Generic (PLEG): container finished" podID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerID="00df46d30f2dec062835699d53703fce86a077ba5310636fe053a7af565d563f" exitCode=0 Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.148254 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerDied","Data":"00df46d30f2dec062835699d53703fce86a077ba5310636fe053a7af565d563f"} Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.468426 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.572824 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.572906 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.573105 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.573993 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.574029 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.574059 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lvgd\" (UniqueName: \"kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.574104 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data\") pod \"bce8da7e-64df-4d22-a514-4a3fd7527813\" (UID: \"bce8da7e-64df-4d22-a514-4a3fd7527813\") " Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.575004 4852 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bce8da7e-64df-4d22-a514-4a3fd7527813-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.578391 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts" (OuterVolumeSpecName: "scripts") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.580389 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.580897 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd" (OuterVolumeSpecName: "kube-api-access-8lvgd") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "kube-api-access-8lvgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.595737 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.626467 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.677019 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.677061 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.677079 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.677095 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lvgd\" (UniqueName: \"kubernetes.io/projected/bce8da7e-64df-4d22-a514-4a3fd7527813-kube-api-access-8lvgd\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.680698 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data" (OuterVolumeSpecName: "config-data") pod "bce8da7e-64df-4d22-a514-4a3fd7527813" (UID: "bce8da7e-64df-4d22-a514-4a3fd7527813"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:18:03 crc kubenswrapper[4852]: I0129 12:18:03.778766 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8da7e-64df-4d22-a514-4a3fd7527813-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.159142 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bce8da7e-64df-4d22-a514-4a3fd7527813","Type":"ContainerDied","Data":"421516ba54f2c24d559b55dc17bd4b5241e3a121a184f4b7841d07ca93ff9922"} Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.159200 4852 scope.go:117] "RemoveContainer" containerID="fab847c52218d5cbd59c21f12ec41422b8e5377ace09fcee3e01fd4b034446f8" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.159204 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.197646 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.201179 4852 scope.go:117] "RemoveContainer" containerID="00df46d30f2dec062835699d53703fce86a077ba5310636fe053a7af565d563f" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.208433 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.225097 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:04 crc kubenswrapper[4852]: E0129 12:18:04.225597 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="cinder-scheduler" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.225616 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="cinder-scheduler" Jan 29 12:18:04 crc kubenswrapper[4852]: E0129 12:18:04.225645 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="probe" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.225653 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="probe" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.225856 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="probe" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.225894 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" containerName="cinder-scheduler" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.227134 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.229626 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.242719 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389091 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389353 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389523 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84f2b\" (UniqueName: \"kubernetes.io/projected/f67f9eec-ad75-496b-ba55-f7773c45bff3-kube-api-access-84f2b\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389679 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389785 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f67f9eec-ad75-496b-ba55-f7773c45bff3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.389853 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.491209 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.491515 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f67f9eec-ad75-496b-ba55-f7773c45bff3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.491556 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.491647 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.491706 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f67f9eec-ad75-496b-ba55-f7773c45bff3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.492042 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.492141 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84f2b\" (UniqueName: \"kubernetes.io/projected/f67f9eec-ad75-496b-ba55-f7773c45bff3-kube-api-access-84f2b\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.497478 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-scripts\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.498373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.501116 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.505516 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f67f9eec-ad75-496b-ba55-f7773c45bff3-config-data\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.514275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84f2b\" (UniqueName: \"kubernetes.io/projected/f67f9eec-ad75-496b-ba55-f7773c45bff3-kube-api-access-84f2b\") pod \"cinder-scheduler-0\" (UID: \"f67f9eec-ad75-496b-ba55-f7773c45bff3\") " pod="openstack/cinder-scheduler-0" Jan 29 12:18:04 crc kubenswrapper[4852]: I0129 12:18:04.543824 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 12:18:05 crc kubenswrapper[4852]: I0129 12:18:05.013325 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 12:18:05 crc kubenswrapper[4852]: I0129 12:18:05.483760 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bce8da7e-64df-4d22-a514-4a3fd7527813" path="/var/lib/kubelet/pods/bce8da7e-64df-4d22-a514-4a3fd7527813/volumes" Jan 29 12:18:05 crc kubenswrapper[4852]: W0129 12:18:05.491849 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf67f9eec_ad75_496b_ba55_f7773c45bff3.slice/crio-fea745199d2b7ec8e90609cbcfc011170b933e060433cd7691cfc54fa61af3fa WatchSource:0}: Error finding container fea745199d2b7ec8e90609cbcfc011170b933e060433cd7691cfc54fa61af3fa: Status 404 returned error can't find the container with id fea745199d2b7ec8e90609cbcfc011170b933e060433cd7691cfc54fa61af3fa Jan 29 12:18:06 crc kubenswrapper[4852]: I0129 12:18:06.191608 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f67f9eec-ad75-496b-ba55-f7773c45bff3","Type":"ContainerStarted","Data":"fea745199d2b7ec8e90609cbcfc011170b933e060433cd7691cfc54fa61af3fa"} Jan 29 12:18:07 crc kubenswrapper[4852]: I0129 12:18:07.205278 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f67f9eec-ad75-496b-ba55-f7773c45bff3","Type":"ContainerStarted","Data":"0fcb0fd369db23431749ba5a940f12cb9d31892b10941ee0f1c55136bd4d84c4"} Jan 29 12:18:07 crc kubenswrapper[4852]: I0129 12:18:07.463240 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:18:07 crc kubenswrapper[4852]: E0129 12:18:07.463680 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:18:08 crc kubenswrapper[4852]: I0129 12:18:08.214700 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f67f9eec-ad75-496b-ba55-f7773c45bff3","Type":"ContainerStarted","Data":"21652bddd052a77a8299c7d00d9fd372efd19e2e72ad900301ef21580111fd8a"} Jan 29 12:18:08 crc kubenswrapper[4852]: I0129 12:18:08.254930 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.254911392 podStartE2EDuration="4.254911392s" podCreationTimestamp="2026-01-29 12:18:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:18:08.242597391 +0000 UTC m=+5785.459928525" watchObservedRunningTime="2026-01-29 12:18:08.254911392 +0000 UTC m=+5785.472242526" Jan 29 12:18:08 crc kubenswrapper[4852]: I0129 12:18:08.455264 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 12:18:09 crc kubenswrapper[4852]: I0129 12:18:09.544782 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 12:18:14 crc kubenswrapper[4852]: I0129 12:18:14.791700 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 12:18:22 crc kubenswrapper[4852]: I0129 12:18:22.463646 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:18:22 crc kubenswrapper[4852]: E0129 12:18:22.464345 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:18:35 crc kubenswrapper[4852]: I0129 12:18:35.463891 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:18:35 crc kubenswrapper[4852]: E0129 12:18:35.465154 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:18:50 crc kubenswrapper[4852]: I0129 12:18:50.464333 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:18:50 crc kubenswrapper[4852]: E0129 12:18:50.465280 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:02 crc kubenswrapper[4852]: I0129 12:19:02.463431 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:19:02 crc kubenswrapper[4852]: E0129 12:19:02.464834 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:14 crc kubenswrapper[4852]: I0129 12:19:14.463460 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:19:14 crc kubenswrapper[4852]: E0129 12:19:14.464340 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:25 crc kubenswrapper[4852]: I0129 12:19:25.463350 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:19:25 crc kubenswrapper[4852]: E0129 12:19:25.464326 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.050266 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-79b0-account-create-update-tphb5"] Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.060791 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-blmr9"] Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.068825 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-79b0-account-create-update-tphb5"] Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.078146 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-blmr9"] Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.476243 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b7eb19d-cf79-43a6-8c6f-f86b6911b36f" path="/var/lib/kubelet/pods/4b7eb19d-cf79-43a6-8c6f-f86b6911b36f/volumes" Jan 29 12:19:29 crc kubenswrapper[4852]: I0129 12:19:29.477301 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="829718a4-2107-42a2-b18a-41e1fbf9df79" path="/var/lib/kubelet/pods/829718a4-2107-42a2-b18a-41e1fbf9df79/volumes" Jan 29 12:19:36 crc kubenswrapper[4852]: I0129 12:19:36.035153 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-8mbc9"] Jan 29 12:19:36 crc kubenswrapper[4852]: I0129 12:19:36.044866 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-8mbc9"] Jan 29 12:19:36 crc kubenswrapper[4852]: I0129 12:19:36.464298 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:19:36 crc kubenswrapper[4852]: E0129 12:19:36.464827 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:37 crc kubenswrapper[4852]: I0129 12:19:37.475868 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c48a9f07-3553-4f8f-9b9d-fc9edfda284e" path="/var/lib/kubelet/pods/c48a9f07-3553-4f8f-9b9d-fc9edfda284e/volumes" Jan 29 12:19:43 crc kubenswrapper[4852]: E0129 12:19:43.118688 4852 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:47440->38.102.83.23:36165: write tcp 38.102.83.23:47440->38.102.83.23:36165: write: broken pipe Jan 29 12:19:50 crc kubenswrapper[4852]: I0129 12:19:50.525977 4852 scope.go:117] "RemoveContainer" containerID="d37997caba7afa88605b4003687b01569d4a04315e8d1dcafd6d2f75549d2660" Jan 29 12:19:50 crc kubenswrapper[4852]: I0129 12:19:50.556393 4852 scope.go:117] "RemoveContainer" containerID="08e496c5fedb80181c2d710ee121097d863ed8a258d51dacc0166e85143f5041" Jan 29 12:19:50 crc kubenswrapper[4852]: I0129 12:19:50.598496 4852 scope.go:117] "RemoveContainer" containerID="107757abec3842f4412c144564863962f627ba2e36569bf21ac5bb17a88a4390" Jan 29 12:19:50 crc kubenswrapper[4852]: I0129 12:19:50.634152 4852 scope.go:117] "RemoveContainer" containerID="2849f04a8e34508e1df10a2a6ca28cfd4152242f9b10d8b90881fe966fba6806" Jan 29 12:19:50 crc kubenswrapper[4852]: I0129 12:19:50.673681 4852 scope.go:117] "RemoveContainer" containerID="1a69e60a5621c2b326393b5e4a4496d2cc5c898287082dc411e371addf37438e" Jan 29 12:19:51 crc kubenswrapper[4852]: I0129 12:19:51.463125 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:19:51 crc kubenswrapper[4852]: E0129 12:19:51.463531 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:19:52 crc kubenswrapper[4852]: I0129 12:19:52.029130 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-xspwv"] Jan 29 12:19:52 crc kubenswrapper[4852]: I0129 12:19:52.038705 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-xspwv"] Jan 29 12:19:53 crc kubenswrapper[4852]: I0129 12:19:53.474871 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed338fa4-983d-416b-95b7-0c6b388b9025" path="/var/lib/kubelet/pods/ed338fa4-983d-416b-95b7-0c6b388b9025/volumes" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.705406 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6njt6"] Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.707180 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.710451 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.718577 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-29mj5"] Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.720698 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.729077 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-29mj5"] Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.738047 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6njt6"] Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.769158 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-qs8nf" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.839920 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-lib\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.839993 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjfbd\" (UniqueName: \"kubernetes.io/projected/e97b86c6-6aa7-48e5-a225-a7c5abed3782-kube-api-access-hjfbd\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840023 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e97b86c6-6aa7-48e5-a225-a7c5abed3782-scripts\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840040 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-log\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840088 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840162 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-etc-ovs\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840192 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-run\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840210 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840245 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnx92\" (UniqueName: \"kubernetes.io/projected/7ff009d1-69d9-49a8-8fd2-caafaab09f52-kube-api-access-vnx92\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840272 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ff009d1-69d9-49a8-8fd2-caafaab09f52-scripts\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.840297 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-log-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941531 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ff009d1-69d9-49a8-8fd2-caafaab09f52-scripts\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-log-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941682 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-lib\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941743 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjfbd\" (UniqueName: \"kubernetes.io/projected/e97b86c6-6aa7-48e5-a225-a7c5abed3782-kube-api-access-hjfbd\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941771 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e97b86c6-6aa7-48e5-a225-a7c5abed3782-scripts\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941823 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-log\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941866 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.941970 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-etc-ovs\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.942008 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-run\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.942030 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.942068 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnx92\" (UniqueName: \"kubernetes.io/projected/7ff009d1-69d9-49a8-8fd2-caafaab09f52-kube-api-access-vnx92\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943019 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-log\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943074 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-etc-ovs\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943189 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-lib\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943302 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-log-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943646 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ff009d1-69d9-49a8-8fd2-caafaab09f52-var-run\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.943773 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e97b86c6-6aa7-48e5-a225-a7c5abed3782-var-run-ovn\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.945905 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e97b86c6-6aa7-48e5-a225-a7c5abed3782-scripts\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.945963 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ff009d1-69d9-49a8-8fd2-caafaab09f52-scripts\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.962667 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnx92\" (UniqueName: \"kubernetes.io/projected/7ff009d1-69d9-49a8-8fd2-caafaab09f52-kube-api-access-vnx92\") pod \"ovn-controller-ovs-29mj5\" (UID: \"7ff009d1-69d9-49a8-8fd2-caafaab09f52\") " pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:55 crc kubenswrapper[4852]: I0129 12:19:55.962716 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjfbd\" (UniqueName: \"kubernetes.io/projected/e97b86c6-6aa7-48e5-a225-a7c5abed3782-kube-api-access-hjfbd\") pod \"ovn-controller-6njt6\" (UID: \"e97b86c6-6aa7-48e5-a225-a7c5abed3782\") " pod="openstack/ovn-controller-6njt6" Jan 29 12:19:56 crc kubenswrapper[4852]: I0129 12:19:56.072191 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6" Jan 29 12:19:56 crc kubenswrapper[4852]: I0129 12:19:56.082539 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:19:56 crc kubenswrapper[4852]: I0129 12:19:56.654296 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6njt6"] Jan 29 12:19:56 crc kubenswrapper[4852]: I0129 12:19:56.989045 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-29mj5"] Jan 29 12:19:56 crc kubenswrapper[4852]: W0129 12:19:56.993243 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ff009d1_69d9_49a8_8fd2_caafaab09f52.slice/crio-08e01042b537ad4e106159a3bd53a63f2ab2086f536b982b8d4eaa8bf7b692ec WatchSource:0}: Error finding container 08e01042b537ad4e106159a3bd53a63f2ab2086f536b982b8d4eaa8bf7b692ec: Status 404 returned error can't find the container with id 08e01042b537ad4e106159a3bd53a63f2ab2086f536b982b8d4eaa8bf7b692ec Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.052542 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-v55s7"] Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.053851 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.058869 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.063600 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9l2q\" (UniqueName: \"kubernetes.io/projected/c378ab9d-4549-4f89-84a2-9aad079bc575-kube-api-access-m9l2q\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.063683 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovs-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.063830 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c378ab9d-4549-4f89-84a2-9aad079bc575-config\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.063884 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovn-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.075159 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v55s7"] Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.166724 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c378ab9d-4549-4f89-84a2-9aad079bc575-config\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.167082 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovn-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.167162 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9l2q\" (UniqueName: \"kubernetes.io/projected/c378ab9d-4549-4f89-84a2-9aad079bc575-kube-api-access-m9l2q\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.167221 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovs-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.167489 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovs-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.167663 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c378ab9d-4549-4f89-84a2-9aad079bc575-ovn-rundir\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.169104 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c378ab9d-4549-4f89-84a2-9aad079bc575-config\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.199907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9l2q\" (UniqueName: \"kubernetes.io/projected/c378ab9d-4549-4f89-84a2-9aad079bc575-kube-api-access-m9l2q\") pod \"ovn-controller-metrics-v55s7\" (UID: \"c378ab9d-4549-4f89-84a2-9aad079bc575\") " pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.320749 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6njt6" event={"ID":"e97b86c6-6aa7-48e5-a225-a7c5abed3782","Type":"ContainerStarted","Data":"6f3f15dfb368d827c4e7ac008dac372c69c57ef81b2eba2b3428cb76e60d8fcc"} Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.320806 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6njt6" event={"ID":"e97b86c6-6aa7-48e5-a225-a7c5abed3782","Type":"ContainerStarted","Data":"2b3296e1f4caa01c2b117018a0c66232fe0f2c6df1bb25d88e03b64a402fa0e5"} Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.320840 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-6njt6" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.328484 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-29mj5" event={"ID":"7ff009d1-69d9-49a8-8fd2-caafaab09f52","Type":"ContainerStarted","Data":"08e01042b537ad4e106159a3bd53a63f2ab2086f536b982b8d4eaa8bf7b692ec"} Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.348875 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-6njt6" podStartSLOduration=2.34885274 podStartE2EDuration="2.34885274s" podCreationTimestamp="2026-01-29 12:19:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:19:57.334884479 +0000 UTC m=+5894.552215623" watchObservedRunningTime="2026-01-29 12:19:57.34885274 +0000 UTC m=+5894.566183874" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.390219 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v55s7" Jan 29 12:19:57 crc kubenswrapper[4852]: I0129 12:19:57.840507 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v55s7"] Jan 29 12:19:57 crc kubenswrapper[4852]: W0129 12:19:57.854109 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc378ab9d_4549_4f89_84a2_9aad079bc575.slice/crio-b0730d3ea5111f87ccf2195a5ceb7344efa9759d367f0d07f6a1f4cf8d20b4e9 WatchSource:0}: Error finding container b0730d3ea5111f87ccf2195a5ceb7344efa9759d367f0d07f6a1f4cf8d20b4e9: Status 404 returned error can't find the container with id b0730d3ea5111f87ccf2195a5ceb7344efa9759d367f0d07f6a1f4cf8d20b4e9 Jan 29 12:19:58 crc kubenswrapper[4852]: I0129 12:19:58.340982 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-29mj5" event={"ID":"7ff009d1-69d9-49a8-8fd2-caafaab09f52","Type":"ContainerStarted","Data":"fb034d2888243b164d3537d945bb34263bf60834a5be5e232624bbc0a006c3fe"} Jan 29 12:19:58 crc kubenswrapper[4852]: I0129 12:19:58.343471 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v55s7" event={"ID":"c378ab9d-4549-4f89-84a2-9aad079bc575","Type":"ContainerStarted","Data":"b0730d3ea5111f87ccf2195a5ceb7344efa9759d367f0d07f6a1f4cf8d20b4e9"} Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.336047 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-skb4w"] Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.337691 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.347588 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-skb4w"] Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.378188 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v55s7" event={"ID":"c378ab9d-4549-4f89-84a2-9aad079bc575","Type":"ContainerStarted","Data":"bf1942a171c71d24c8bbe0ab9f2551df6bf891c873353c6049f7d02a4c591125"} Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.423224 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlcgv\" (UniqueName: \"kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.423309 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.524944 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlcgv\" (UniqueName: \"kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.525326 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.526566 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.548962 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlcgv\" (UniqueName: \"kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv\") pod \"octavia-db-create-skb4w\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " pod="openstack/octavia-db-create-skb4w" Jan 29 12:19:59 crc kubenswrapper[4852]: I0129 12:19:59.691865 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-skb4w" Jan 29 12:20:00 crc kubenswrapper[4852]: I0129 12:20:00.187697 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-skb4w"] Jan 29 12:20:00 crc kubenswrapper[4852]: I0129 12:20:00.390294 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-skb4w" event={"ID":"53624bd6-a8fc-485a-a225-4ed4a82cdb06","Type":"ContainerStarted","Data":"fab3e56be9abff4e0335ce21f9c7bf4de127de7c7a7bf35836d0b7c93b446f93"} Jan 29 12:20:00 crc kubenswrapper[4852]: I0129 12:20:00.393938 4852 generic.go:334] "Generic (PLEG): container finished" podID="7ff009d1-69d9-49a8-8fd2-caafaab09f52" containerID="fb034d2888243b164d3537d945bb34263bf60834a5be5e232624bbc0a006c3fe" exitCode=0 Jan 29 12:20:00 crc kubenswrapper[4852]: I0129 12:20:00.394098 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-29mj5" event={"ID":"7ff009d1-69d9-49a8-8fd2-caafaab09f52","Type":"ContainerDied","Data":"fb034d2888243b164d3537d945bb34263bf60834a5be5e232624bbc0a006c3fe"} Jan 29 12:20:00 crc kubenswrapper[4852]: I0129 12:20:00.412313 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-v55s7" podStartSLOduration=3.412294114 podStartE2EDuration="3.412294114s" podCreationTimestamp="2026-01-29 12:19:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:20:00.406442012 +0000 UTC m=+5897.623773166" watchObservedRunningTime="2026-01-29 12:20:00.412294114 +0000 UTC m=+5897.629625248" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.025851 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-9d89-account-create-update-n8cxx"] Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.027249 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.029730 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.088648 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-9d89-account-create-update-n8cxx"] Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.172480 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.172571 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkstm\" (UniqueName: \"kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.275163 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.275345 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkstm\" (UniqueName: \"kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.276152 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.302429 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkstm\" (UniqueName: \"kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm\") pod \"octavia-9d89-account-create-update-n8cxx\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.356056 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.406135 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-29mj5" event={"ID":"7ff009d1-69d9-49a8-8fd2-caafaab09f52","Type":"ContainerStarted","Data":"67bd2ed1c9545c3bf667ba4a3e1e879cfa873c457fa50a508b47fe44754fdec1"} Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.406178 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-29mj5" event={"ID":"7ff009d1-69d9-49a8-8fd2-caafaab09f52","Type":"ContainerStarted","Data":"a360112981b9a9cf9ba2536a3ebdf6302a85c3a70cb091e735a933fb568e08ad"} Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.406468 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.406535 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.412350 4852 generic.go:334] "Generic (PLEG): container finished" podID="53624bd6-a8fc-485a-a225-4ed4a82cdb06" containerID="8dd3651d5d2a4606926e97715ab6ebd9494d8b8a21bd5c61fb93d4e6ce0863c9" exitCode=0 Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.412456 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-skb4w" event={"ID":"53624bd6-a8fc-485a-a225-4ed4a82cdb06","Type":"ContainerDied","Data":"8dd3651d5d2a4606926e97715ab6ebd9494d8b8a21bd5c61fb93d4e6ce0863c9"} Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.436023 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-29mj5" podStartSLOduration=6.436001698 podStartE2EDuration="6.436001698s" podCreationTimestamp="2026-01-29 12:19:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:20:01.429354217 +0000 UTC m=+5898.646685351" watchObservedRunningTime="2026-01-29 12:20:01.436001698 +0000 UTC m=+5898.653332832" Jan 29 12:20:01 crc kubenswrapper[4852]: I0129 12:20:01.864462 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-9d89-account-create-update-n8cxx"] Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.430330 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-9d89-account-create-update-n8cxx" event={"ID":"c41c0a46-8cd3-44af-8351-8a4008c6622f","Type":"ContainerStarted","Data":"a96dca572f197dbbc5f8065c85bc43ea15de1f5c1f4ac7de4881d3e089da7468"} Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.430737 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-9d89-account-create-update-n8cxx" event={"ID":"c41c0a46-8cd3-44af-8351-8a4008c6622f","Type":"ContainerStarted","Data":"96558c4599ac4987e1ce4de8b88aafe49c6f753742bde2b6b10173ab26488c58"} Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.463538 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.463658 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-9d89-account-create-update-n8cxx" podStartSLOduration=2.463639898 podStartE2EDuration="2.463639898s" podCreationTimestamp="2026-01-29 12:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:20:02.453531491 +0000 UTC m=+5899.670862635" watchObservedRunningTime="2026-01-29 12:20:02.463639898 +0000 UTC m=+5899.680971032" Jan 29 12:20:02 crc kubenswrapper[4852]: E0129 12:20:02.463896 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.812220 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-skb4w" Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.906314 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts\") pod \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.906431 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlcgv\" (UniqueName: \"kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv\") pod \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\" (UID: \"53624bd6-a8fc-485a-a225-4ed4a82cdb06\") " Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.908072 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "53624bd6-a8fc-485a-a225-4ed4a82cdb06" (UID: "53624bd6-a8fc-485a-a225-4ed4a82cdb06"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:02 crc kubenswrapper[4852]: I0129 12:20:02.919554 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv" (OuterVolumeSpecName: "kube-api-access-nlcgv") pod "53624bd6-a8fc-485a-a225-4ed4a82cdb06" (UID: "53624bd6-a8fc-485a-a225-4ed4a82cdb06"). InnerVolumeSpecName "kube-api-access-nlcgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.008258 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlcgv\" (UniqueName: \"kubernetes.io/projected/53624bd6-a8fc-485a-a225-4ed4a82cdb06-kube-api-access-nlcgv\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.008303 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53624bd6-a8fc-485a-a225-4ed4a82cdb06-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.449290 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-skb4w" event={"ID":"53624bd6-a8fc-485a-a225-4ed4a82cdb06","Type":"ContainerDied","Data":"fab3e56be9abff4e0335ce21f9c7bf4de127de7c7a7bf35836d0b7c93b446f93"} Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.449345 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fab3e56be9abff4e0335ce21f9c7bf4de127de7c7a7bf35836d0b7c93b446f93" Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.449426 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-skb4w" Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.456394 4852 generic.go:334] "Generic (PLEG): container finished" podID="c41c0a46-8cd3-44af-8351-8a4008c6622f" containerID="a96dca572f197dbbc5f8065c85bc43ea15de1f5c1f4ac7de4881d3e089da7468" exitCode=0 Jan 29 12:20:03 crc kubenswrapper[4852]: I0129 12:20:03.456464 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-9d89-account-create-update-n8cxx" event={"ID":"c41c0a46-8cd3-44af-8351-8a4008c6622f","Type":"ContainerDied","Data":"a96dca572f197dbbc5f8065c85bc43ea15de1f5c1f4ac7de4881d3e089da7468"} Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.835486 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.947777 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts\") pod \"c41c0a46-8cd3-44af-8351-8a4008c6622f\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.947832 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkstm\" (UniqueName: \"kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm\") pod \"c41c0a46-8cd3-44af-8351-8a4008c6622f\" (UID: \"c41c0a46-8cd3-44af-8351-8a4008c6622f\") " Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.948423 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c41c0a46-8cd3-44af-8351-8a4008c6622f" (UID: "c41c0a46-8cd3-44af-8351-8a4008c6622f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.948545 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c41c0a46-8cd3-44af-8351-8a4008c6622f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:04 crc kubenswrapper[4852]: I0129 12:20:04.960825 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm" (OuterVolumeSpecName: "kube-api-access-wkstm") pod "c41c0a46-8cd3-44af-8351-8a4008c6622f" (UID: "c41c0a46-8cd3-44af-8351-8a4008c6622f"). InnerVolumeSpecName "kube-api-access-wkstm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:05 crc kubenswrapper[4852]: I0129 12:20:05.050395 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkstm\" (UniqueName: \"kubernetes.io/projected/c41c0a46-8cd3-44af-8351-8a4008c6622f-kube-api-access-wkstm\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:05 crc kubenswrapper[4852]: I0129 12:20:05.479497 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-9d89-account-create-update-n8cxx" event={"ID":"c41c0a46-8cd3-44af-8351-8a4008c6622f","Type":"ContainerDied","Data":"96558c4599ac4987e1ce4de8b88aafe49c6f753742bde2b6b10173ab26488c58"} Jan 29 12:20:05 crc kubenswrapper[4852]: I0129 12:20:05.479533 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96558c4599ac4987e1ce4de8b88aafe49c6f753742bde2b6b10173ab26488c58" Jan 29 12:20:05 crc kubenswrapper[4852]: I0129 12:20:05.479572 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-9d89-account-create-update-n8cxx" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.383354 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-sm2q7"] Jan 29 12:20:07 crc kubenswrapper[4852]: E0129 12:20:07.384023 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53624bd6-a8fc-485a-a225-4ed4a82cdb06" containerName="mariadb-database-create" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.384035 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="53624bd6-a8fc-485a-a225-4ed4a82cdb06" containerName="mariadb-database-create" Jan 29 12:20:07 crc kubenswrapper[4852]: E0129 12:20:07.384062 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c41c0a46-8cd3-44af-8351-8a4008c6622f" containerName="mariadb-account-create-update" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.384069 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c41c0a46-8cd3-44af-8351-8a4008c6622f" containerName="mariadb-account-create-update" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.384267 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c41c0a46-8cd3-44af-8351-8a4008c6622f" containerName="mariadb-account-create-update" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.384287 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="53624bd6-a8fc-485a-a225-4ed4a82cdb06" containerName="mariadb-database-create" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.384916 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.391463 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-sm2q7"] Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.437214 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnbnn\" (UniqueName: \"kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.437470 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.539824 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnbnn\" (UniqueName: \"kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.539893 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.540806 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.567678 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnbnn\" (UniqueName: \"kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn\") pod \"octavia-persistence-db-create-sm2q7\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:07 crc kubenswrapper[4852]: I0129 12:20:07.748341 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.195518 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-6377-account-create-update-jp4k2"] Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.197356 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.201172 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.211857 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-6377-account-create-update-jp4k2"] Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.239453 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-sm2q7"] Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.355896 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.356156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bdrc\" (UniqueName: \"kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.458500 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.458740 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bdrc\" (UniqueName: \"kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.459635 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.492869 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bdrc\" (UniqueName: \"kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc\") pod \"octavia-6377-account-create-update-jp4k2\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.510260 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-sm2q7" event={"ID":"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88","Type":"ContainerStarted","Data":"311b71a0a0d2e39f11cc25ea1342b8261b48a14e727564f106459b6a730dc08e"} Jan 29 12:20:08 crc kubenswrapper[4852]: I0129 12:20:08.593989 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.056954 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-6377-account-create-update-jp4k2"] Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.520450 4852 generic.go:334] "Generic (PLEG): container finished" podID="3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" containerID="ead816478c4b0e4f70347e6fa086efaf9a54dbb6fa83883a0636d3d7aae043ef" exitCode=0 Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.521789 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-sm2q7" event={"ID":"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88","Type":"ContainerDied","Data":"ead816478c4b0e4f70347e6fa086efaf9a54dbb6fa83883a0636d3d7aae043ef"} Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.524147 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-6377-account-create-update-jp4k2" event={"ID":"8f946532-86bc-4386-ac2e-188452c413d5","Type":"ContainerStarted","Data":"62bac8f7709e4e9cc01f07a112cd4b5dc9cc8ccfa4eeee9a1b5124a631fdafbc"} Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.524428 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-6377-account-create-update-jp4k2" event={"ID":"8f946532-86bc-4386-ac2e-188452c413d5","Type":"ContainerStarted","Data":"94c099f0ce087a3b2b7df31bebd859497a214d6d3466bfa767bb7fcfb37f19ab"} Jan 29 12:20:09 crc kubenswrapper[4852]: I0129 12:20:09.558629 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-6377-account-create-update-jp4k2" podStartSLOduration=1.558612073 podStartE2EDuration="1.558612073s" podCreationTimestamp="2026-01-29 12:20:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:20:09.554708248 +0000 UTC m=+5906.772039402" watchObservedRunningTime="2026-01-29 12:20:09.558612073 +0000 UTC m=+5906.775943207" Jan 29 12:20:10 crc kubenswrapper[4852]: I0129 12:20:10.913079 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.009988 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnbnn\" (UniqueName: \"kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn\") pod \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.010230 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts\") pod \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\" (UID: \"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88\") " Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.010766 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" (UID: "3ebf09e3-a2fe-42f2-a3cc-6634486a8f88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.011314 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.015024 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn" (OuterVolumeSpecName: "kube-api-access-rnbnn") pod "3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" (UID: "3ebf09e3-a2fe-42f2-a3cc-6634486a8f88"). InnerVolumeSpecName "kube-api-access-rnbnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.112931 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnbnn\" (UniqueName: \"kubernetes.io/projected/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88-kube-api-access-rnbnn\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.544840 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-sm2q7" event={"ID":"3ebf09e3-a2fe-42f2-a3cc-6634486a8f88","Type":"ContainerDied","Data":"311b71a0a0d2e39f11cc25ea1342b8261b48a14e727564f106459b6a730dc08e"} Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.544908 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="311b71a0a0d2e39f11cc25ea1342b8261b48a14e727564f106459b6a730dc08e" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.544856 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-sm2q7" Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.546554 4852 generic.go:334] "Generic (PLEG): container finished" podID="8f946532-86bc-4386-ac2e-188452c413d5" containerID="62bac8f7709e4e9cc01f07a112cd4b5dc9cc8ccfa4eeee9a1b5124a631fdafbc" exitCode=0 Jan 29 12:20:11 crc kubenswrapper[4852]: I0129 12:20:11.546693 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-6377-account-create-update-jp4k2" event={"ID":"8f946532-86bc-4386-ac2e-188452c413d5","Type":"ContainerDied","Data":"62bac8f7709e4e9cc01f07a112cd4b5dc9cc8ccfa4eeee9a1b5124a631fdafbc"} Jan 29 12:20:12 crc kubenswrapper[4852]: I0129 12:20:12.959193 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.059409 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts\") pod \"8f946532-86bc-4386-ac2e-188452c413d5\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.059620 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bdrc\" (UniqueName: \"kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc\") pod \"8f946532-86bc-4386-ac2e-188452c413d5\" (UID: \"8f946532-86bc-4386-ac2e-188452c413d5\") " Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.060300 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8f946532-86bc-4386-ac2e-188452c413d5" (UID: "8f946532-86bc-4386-ac2e-188452c413d5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.066198 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc" (OuterVolumeSpecName: "kube-api-access-5bdrc") pod "8f946532-86bc-4386-ac2e-188452c413d5" (UID: "8f946532-86bc-4386-ac2e-188452c413d5"). InnerVolumeSpecName "kube-api-access-5bdrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.161968 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f946532-86bc-4386-ac2e-188452c413d5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.162015 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bdrc\" (UniqueName: \"kubernetes.io/projected/8f946532-86bc-4386-ac2e-188452c413d5-kube-api-access-5bdrc\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.841776 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-6377-account-create-update-jp4k2" event={"ID":"8f946532-86bc-4386-ac2e-188452c413d5","Type":"ContainerDied","Data":"94c099f0ce087a3b2b7df31bebd859497a214d6d3466bfa767bb7fcfb37f19ab"} Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.841833 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94c099f0ce087a3b2b7df31bebd859497a214d6d3466bfa767bb7fcfb37f19ab" Jan 29 12:20:13 crc kubenswrapper[4852]: I0129 12:20:13.841849 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-6377-account-create-update-jp4k2" Jan 29 12:20:16 crc kubenswrapper[4852]: I0129 12:20:16.463866 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:20:16 crc kubenswrapper[4852]: E0129 12:20:16.464459 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.510754 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-7679fffcf8-j62tz"] Jan 29 12:20:19 crc kubenswrapper[4852]: E0129 12:20:19.511530 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" containerName="mariadb-database-create" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.511541 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" containerName="mariadb-database-create" Jan 29 12:20:19 crc kubenswrapper[4852]: E0129 12:20:19.511559 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f946532-86bc-4386-ac2e-188452c413d5" containerName="mariadb-account-create-update" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.511565 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f946532-86bc-4386-ac2e-188452c413d5" containerName="mariadb-account-create-update" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.511762 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f946532-86bc-4386-ac2e-188452c413d5" containerName="mariadb-account-create-update" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.511778 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" containerName="mariadb-database-create" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.513445 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.522410 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.522631 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-fcgj6" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.522814 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.534136 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-7679fffcf8-j62tz"] Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.541997 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-combined-ca-bundle\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.542129 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-octavia-run\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.542160 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.542268 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-scripts\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.542301 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data-merged\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644095 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-combined-ca-bundle\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644297 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644317 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-octavia-run\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644498 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-scripts\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644543 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data-merged\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.644979 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data-merged\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.645396 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-octavia-run\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.650619 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-config-data\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.659413 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-combined-ca-bundle\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.661179 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fe31de4-bde3-421f-92d2-c5cb8b3022fd-scripts\") pod \"octavia-api-7679fffcf8-j62tz\" (UID: \"2fe31de4-bde3-421f-92d2-c5cb8b3022fd\") " pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:19 crc kubenswrapper[4852]: I0129 12:20:19.845920 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:20:20 crc kubenswrapper[4852]: I0129 12:20:20.368458 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-7679fffcf8-j62tz"] Jan 29 12:20:20 crc kubenswrapper[4852]: W0129 12:20:20.368534 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fe31de4_bde3_421f_92d2_c5cb8b3022fd.slice/crio-f9b4bb681c93d54fc5bb24aab925a3a39f08f77ec720533e2c42b197355c8b22 WatchSource:0}: Error finding container f9b4bb681c93d54fc5bb24aab925a3a39f08f77ec720533e2c42b197355c8b22: Status 404 returned error can't find the container with id f9b4bb681c93d54fc5bb24aab925a3a39f08f77ec720533e2c42b197355c8b22 Jan 29 12:20:20 crc kubenswrapper[4852]: I0129 12:20:20.938436 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7679fffcf8-j62tz" event={"ID":"2fe31de4-bde3-421f-92d2-c5cb8b3022fd","Type":"ContainerStarted","Data":"f9b4bb681c93d54fc5bb24aab925a3a39f08f77ec720533e2c42b197355c8b22"} Jan 29 12:20:28 crc kubenswrapper[4852]: I0129 12:20:28.464153 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:20:28 crc kubenswrapper[4852]: E0129 12:20:28.465549 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.153434 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.155007 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6njt6" podUID="e97b86c6-6aa7-48e5-a225-a7c5abed3782" containerName="ovn-controller" probeResult="failure" output=< Jan 29 12:20:31 crc kubenswrapper[4852]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 12:20:31 crc kubenswrapper[4852]: > Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.158313 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-29mj5" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.302380 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6njt6-config-h2rcv"] Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.305027 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.311751 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.320284 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6njt6-config-h2rcv"] Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504380 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504446 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504523 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504557 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504606 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnr2v\" (UniqueName: \"kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.504682 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609064 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609210 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609236 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnr2v\" (UniqueName: \"kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609477 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609609 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609725 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.609784 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.610671 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.612695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.640209 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnr2v\" (UniqueName: \"kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v\") pod \"ovn-controller-6njt6-config-h2rcv\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:31 crc kubenswrapper[4852]: I0129 12:20:31.643606 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:36 crc kubenswrapper[4852]: I0129 12:20:36.127671 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6njt6" podUID="e97b86c6-6aa7-48e5-a225-a7c5abed3782" containerName="ovn-controller" probeResult="failure" output=< Jan 29 12:20:36 crc kubenswrapper[4852]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 12:20:36 crc kubenswrapper[4852]: > Jan 29 12:20:40 crc kubenswrapper[4852]: I0129 12:20:40.463213 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:20:40 crc kubenswrapper[4852]: E0129 12:20:40.464074 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:20:41 crc kubenswrapper[4852]: I0129 12:20:41.147305 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6njt6" podUID="e97b86c6-6aa7-48e5-a225-a7c5abed3782" containerName="ovn-controller" probeResult="failure" output=< Jan 29 12:20:41 crc kubenswrapper[4852]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 12:20:41 crc kubenswrapper[4852]: > Jan 29 12:20:45 crc kubenswrapper[4852]: E0129 12:20:45.274013 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified" Jan 29 12:20:45 crc kubenswrapper[4852]: E0129 12:20:45.274561 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/container-scripts/init.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-merged,ReadOnly:false,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42437,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42437,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-api-7679fffcf8-j62tz_openstack(2fe31de4-bde3-421f-92d2-c5cb8b3022fd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:20:45 crc kubenswrapper[4852]: E0129 12:20:45.276402 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/octavia-api-7679fffcf8-j62tz" podUID="2fe31de4-bde3-421f-92d2-c5cb8b3022fd" Jan 29 12:20:45 crc kubenswrapper[4852]: I0129 12:20:45.790902 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6njt6-config-h2rcv"] Jan 29 12:20:46 crc kubenswrapper[4852]: I0129 12:20:46.114143 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6njt6" podUID="e97b86c6-6aa7-48e5-a225-a7c5abed3782" containerName="ovn-controller" probeResult="failure" output=< Jan 29 12:20:46 crc kubenswrapper[4852]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 12:20:46 crc kubenswrapper[4852]: > Jan 29 12:20:46 crc kubenswrapper[4852]: I0129 12:20:46.199404 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6njt6-config-h2rcv" event={"ID":"31af334c-da7d-4421-9525-51629ef66705","Type":"ContainerStarted","Data":"bfe0cbafb0eca0cfb38f9b372dc9422ca1ff807ed09a20aa75e188722510d602"} Jan 29 12:20:46 crc kubenswrapper[4852]: I0129 12:20:46.199799 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6njt6-config-h2rcv" event={"ID":"31af334c-da7d-4421-9525-51629ef66705","Type":"ContainerStarted","Data":"71eeea656c1653d93e01feadbebdc3adfc1ffab5d52d6b55d9b4b08e5b21ddac"} Jan 29 12:20:46 crc kubenswrapper[4852]: E0129 12:20:46.201437 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified\\\"\"" pod="openstack/octavia-api-7679fffcf8-j62tz" podUID="2fe31de4-bde3-421f-92d2-c5cb8b3022fd" Jan 29 12:20:46 crc kubenswrapper[4852]: I0129 12:20:46.244913 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-6njt6-config-h2rcv" podStartSLOduration=15.244894973 podStartE2EDuration="15.244894973s" podCreationTimestamp="2026-01-29 12:20:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:20:46.242020202 +0000 UTC m=+5943.459351336" watchObservedRunningTime="2026-01-29 12:20:46.244894973 +0000 UTC m=+5943.462226107" Jan 29 12:20:47 crc kubenswrapper[4852]: I0129 12:20:47.213540 4852 generic.go:334] "Generic (PLEG): container finished" podID="31af334c-da7d-4421-9525-51629ef66705" containerID="bfe0cbafb0eca0cfb38f9b372dc9422ca1ff807ed09a20aa75e188722510d602" exitCode=0 Jan 29 12:20:47 crc kubenswrapper[4852]: I0129 12:20:47.213638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6njt6-config-h2rcv" event={"ID":"31af334c-da7d-4421-9525-51629ef66705","Type":"ContainerDied","Data":"bfe0cbafb0eca0cfb38f9b372dc9422ca1ff807ed09a20aa75e188722510d602"} Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.619280 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814220 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814313 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814388 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnr2v\" (UniqueName: \"kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814401 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814469 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814522 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814538 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814565 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run\") pod \"31af334c-da7d-4421-9525-51629ef66705\" (UID: \"31af334c-da7d-4421-9525-51629ef66705\") " Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814976 4852 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.814993 4852 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.815011 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run" (OuterVolumeSpecName: "var-run") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.815748 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.816296 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts" (OuterVolumeSpecName: "scripts") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.831159 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v" (OuterVolumeSpecName: "kube-api-access-wnr2v") pod "31af334c-da7d-4421-9525-51629ef66705" (UID: "31af334c-da7d-4421-9525-51629ef66705"). InnerVolumeSpecName "kube-api-access-wnr2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.890110 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6njt6-config-h2rcv"] Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.900899 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-6njt6-config-h2rcv"] Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.917051 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.917097 4852 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/31af334c-da7d-4421-9525-51629ef66705-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.917111 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnr2v\" (UniqueName: \"kubernetes.io/projected/31af334c-da7d-4421-9525-51629ef66705-kube-api-access-wnr2v\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:48 crc kubenswrapper[4852]: I0129 12:20:48.917127 4852 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/31af334c-da7d-4421-9525-51629ef66705-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:49 crc kubenswrapper[4852]: I0129 12:20:49.237896 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71eeea656c1653d93e01feadbebdc3adfc1ffab5d52d6b55d9b4b08e5b21ddac" Jan 29 12:20:49 crc kubenswrapper[4852]: I0129 12:20:49.237943 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6njt6-config-h2rcv" Jan 29 12:20:49 crc kubenswrapper[4852]: I0129 12:20:49.475623 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31af334c-da7d-4421-9525-51629ef66705" path="/var/lib/kubelet/pods/31af334c-da7d-4421-9525-51629ef66705/volumes" Jan 29 12:20:50 crc kubenswrapper[4852]: I0129 12:20:50.792784 4852 scope.go:117] "RemoveContainer" containerID="c15b10e98fde22342b6630391d8e74a7f2d1c30a4e47629ec715891a14be63f9" Jan 29 12:20:50 crc kubenswrapper[4852]: I0129 12:20:50.827570 4852 scope.go:117] "RemoveContainer" containerID="12a9892db3ae1e68670100f9f092ac08aaef25aaeb89752a18e351f5a33682c0" Jan 29 12:20:50 crc kubenswrapper[4852]: I0129 12:20:50.882098 4852 scope.go:117] "RemoveContainer" containerID="82ee2f082f25cf881e48c0dc5a07227b1115d3228ec25ed656eb037a912baa5e" Jan 29 12:20:51 crc kubenswrapper[4852]: I0129 12:20:51.155701 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-6njt6" Jan 29 12:20:51 crc kubenswrapper[4852]: I0129 12:20:51.464553 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:20:51 crc kubenswrapper[4852]: E0129 12:20:51.465094 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:21:02 crc kubenswrapper[4852]: I0129 12:21:02.378336 4852 generic.go:334] "Generic (PLEG): container finished" podID="2fe31de4-bde3-421f-92d2-c5cb8b3022fd" containerID="bb04080c416d2635ffd675087cd57e5a46e7218e88c79d8770e56cd446e93f35" exitCode=0 Jan 29 12:21:02 crc kubenswrapper[4852]: I0129 12:21:02.378421 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7679fffcf8-j62tz" event={"ID":"2fe31de4-bde3-421f-92d2-c5cb8b3022fd","Type":"ContainerDied","Data":"bb04080c416d2635ffd675087cd57e5a46e7218e88c79d8770e56cd446e93f35"} Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.005470 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-xw2qz"] Jan 29 12:21:03 crc kubenswrapper[4852]: E0129 12:21:03.008271 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31af334c-da7d-4421-9525-51629ef66705" containerName="ovn-config" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.008302 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="31af334c-da7d-4421-9525-51629ef66705" containerName="ovn-config" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.008531 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="31af334c-da7d-4421-9525-51629ef66705" containerName="ovn-config" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.009892 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.012034 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.014206 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.014637 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.018526 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-xw2qz"] Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.121407 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-scripts\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.121654 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/65be4657-225c-4b92-b227-cabe5bf069e2-config-data-merged\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.121684 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/65be4657-225c-4b92-b227-cabe5bf069e2-hm-ports\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.121742 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-config-data\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.224509 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/65be4657-225c-4b92-b227-cabe5bf069e2-config-data-merged\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.225096 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/65be4657-225c-4b92-b227-cabe5bf069e2-config-data-merged\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.225137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/65be4657-225c-4b92-b227-cabe5bf069e2-hm-ports\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.225292 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-config-data\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.225337 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-scripts\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.225840 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/65be4657-225c-4b92-b227-cabe5bf069e2-hm-ports\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.229507 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-config-data\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.231257 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65be4657-225c-4b92-b227-cabe5bf069e2-scripts\") pod \"octavia-rsyslog-xw2qz\" (UID: \"65be4657-225c-4b92-b227-cabe5bf069e2\") " pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.345964 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.400225 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7679fffcf8-j62tz" event={"ID":"2fe31de4-bde3-421f-92d2-c5cb8b3022fd","Type":"ContainerStarted","Data":"07798d78217aa9773013972add14c3bc41353bf0025547645907d9e2ef02b616"} Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.400278 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7679fffcf8-j62tz" event={"ID":"2fe31de4-bde3-421f-92d2-c5cb8b3022fd","Type":"ContainerStarted","Data":"c45113b68ac425a11c59bcd7f700425d641ac6c272230fd9a48d01e342d5fdc6"} Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.401786 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.401838 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.435065 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-7679fffcf8-j62tz" podStartSLOduration=3.493743836 podStartE2EDuration="44.435043195s" podCreationTimestamp="2026-01-29 12:20:19 +0000 UTC" firstStartedPulling="2026-01-29 12:20:20.371049832 +0000 UTC m=+5917.588380966" lastFinishedPulling="2026-01-29 12:21:01.312349171 +0000 UTC m=+5958.529680325" observedRunningTime="2026-01-29 12:21:03.424915568 +0000 UTC m=+5960.642246712" watchObservedRunningTime="2026-01-29 12:21:03.435043195 +0000 UTC m=+5960.652374339" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.657212 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.659276 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.662619 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.670004 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.738337 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.738452 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.853737 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.853841 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.855016 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.866648 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config\") pod \"octavia-image-upload-59f8cff499-7s2p8\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:03 crc kubenswrapper[4852]: I0129 12:21:03.998122 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:21:04 crc kubenswrapper[4852]: I0129 12:21:04.009737 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-xw2qz"] Jan 29 12:21:04 crc kubenswrapper[4852]: I0129 12:21:04.078427 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-xw2qz"] Jan 29 12:21:04 crc kubenswrapper[4852]: I0129 12:21:04.409993 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-xw2qz" event={"ID":"65be4657-225c-4b92-b227-cabe5bf069e2","Type":"ContainerStarted","Data":"a246c102dd8e70b70a66cca060c0619c7701c155c88629ac6a1f439f9962c958"} Jan 29 12:21:04 crc kubenswrapper[4852]: I0129 12:21:04.468851 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:21:04 crc kubenswrapper[4852]: E0129 12:21:04.469058 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:21:04 crc kubenswrapper[4852]: I0129 12:21:04.471799 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:21:05 crc kubenswrapper[4852]: I0129 12:21:05.425382 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerStarted","Data":"1203c6fd2a39a12626698313157f55610e331c9da495e6c60edbd0497fdcaabf"} Jan 29 12:21:07 crc kubenswrapper[4852]: I0129 12:21:07.447503 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-xw2qz" event={"ID":"65be4657-225c-4b92-b227-cabe5bf069e2","Type":"ContainerStarted","Data":"730c30d270f3f976769be0b19cb7db39e40790d2ce9b73e3e83b4eeaa8e9eba7"} Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.206360 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-wpt84"] Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.208136 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.211346 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.222325 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-wpt84"] Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.253666 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.253752 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.253897 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.253930 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.356323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.356565 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.356634 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.356779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.357042 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.363538 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.364112 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.377694 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle\") pod \"octavia-db-sync-wpt84\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:08 crc kubenswrapper[4852]: I0129 12:21:08.537872 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:10 crc kubenswrapper[4852]: I0129 12:21:10.494185 4852 generic.go:334] "Generic (PLEG): container finished" podID="65be4657-225c-4b92-b227-cabe5bf069e2" containerID="730c30d270f3f976769be0b19cb7db39e40790d2ce9b73e3e83b4eeaa8e9eba7" exitCode=0 Jan 29 12:21:10 crc kubenswrapper[4852]: I0129 12:21:10.494495 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-xw2qz" event={"ID":"65be4657-225c-4b92-b227-cabe5bf069e2","Type":"ContainerDied","Data":"730c30d270f3f976769be0b19cb7db39e40790d2ce9b73e3e83b4eeaa8e9eba7"} Jan 29 12:21:10 crc kubenswrapper[4852]: I0129 12:21:10.712739 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-wpt84"] Jan 29 12:21:11 crc kubenswrapper[4852]: I0129 12:21:11.506557 4852 generic.go:334] "Generic (PLEG): container finished" podID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerID="356a7e6bdb87dfe2fe678278c7c7e143d9514714440aaf3d981f8fd2c8a9d0af" exitCode=0 Jan 29 12:21:11 crc kubenswrapper[4852]: I0129 12:21:11.506625 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-wpt84" event={"ID":"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006","Type":"ContainerDied","Data":"356a7e6bdb87dfe2fe678278c7c7e143d9514714440aaf3d981f8fd2c8a9d0af"} Jan 29 12:21:11 crc kubenswrapper[4852]: I0129 12:21:11.507154 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-wpt84" event={"ID":"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006","Type":"ContainerStarted","Data":"5f7e0038a3b6664d26b734853ec43233932edb14c7c0e287456a76f475607388"} Jan 29 12:21:17 crc kubenswrapper[4852]: I0129 12:21:17.463733 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:21:17 crc kubenswrapper[4852]: E0129 12:21:17.464768 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.590823 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerStarted","Data":"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0"} Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.595919 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-wpt84" event={"ID":"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006","Type":"ContainerStarted","Data":"d41c63a1c4f493cef337de90e1226c40bc0d79445712f98cd191c43ad5b03ade"} Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.598387 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-xw2qz" event={"ID":"65be4657-225c-4b92-b227-cabe5bf069e2","Type":"ContainerStarted","Data":"396895d236356bae19bfb37d320b1f8902baf05606fd29d2ae9926c43a5e963e"} Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.599149 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.637269 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-xw2qz" podStartSLOduration=2.457510268 podStartE2EDuration="18.637246102s" podCreationTimestamp="2026-01-29 12:21:02 +0000 UTC" firstStartedPulling="2026-01-29 12:21:04.041382111 +0000 UTC m=+5961.258713245" lastFinishedPulling="2026-01-29 12:21:20.221117935 +0000 UTC m=+5977.438449079" observedRunningTime="2026-01-29 12:21:20.630044536 +0000 UTC m=+5977.847375690" watchObservedRunningTime="2026-01-29 12:21:20.637246102 +0000 UTC m=+5977.854577246" Jan 29 12:21:20 crc kubenswrapper[4852]: I0129 12:21:20.650005 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-wpt84" podStartSLOduration=12.649982362 podStartE2EDuration="12.649982362s" podCreationTimestamp="2026-01-29 12:21:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:21:20.648448985 +0000 UTC m=+5977.865780119" watchObservedRunningTime="2026-01-29 12:21:20.649982362 +0000 UTC m=+5977.867313496" Jan 29 12:21:22 crc kubenswrapper[4852]: I0129 12:21:22.624468 4852 generic.go:334] "Generic (PLEG): container finished" podID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerID="d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0" exitCode=0 Jan 29 12:21:22 crc kubenswrapper[4852]: I0129 12:21:22.625349 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerDied","Data":"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0"} Jan 29 12:21:25 crc kubenswrapper[4852]: I0129 12:21:25.609495 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:21:25 crc kubenswrapper[4852]: I0129 12:21:25.612110 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-7679fffcf8-j62tz" Jan 29 12:21:25 crc kubenswrapper[4852]: I0129 12:21:25.657406 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerStarted","Data":"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee"} Jan 29 12:21:25 crc kubenswrapper[4852]: I0129 12:21:25.719858 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" podStartSLOduration=2.173518368 podStartE2EDuration="22.719833034s" podCreationTimestamp="2026-01-29 12:21:03 +0000 UTC" firstStartedPulling="2026-01-29 12:21:04.472689078 +0000 UTC m=+5961.690020212" lastFinishedPulling="2026-01-29 12:21:25.019003734 +0000 UTC m=+5982.236334878" observedRunningTime="2026-01-29 12:21:25.696996247 +0000 UTC m=+5982.914327371" watchObservedRunningTime="2026-01-29 12:21:25.719833034 +0000 UTC m=+5982.937164158" Jan 29 12:21:28 crc kubenswrapper[4852]: I0129 12:21:28.682930 4852 generic.go:334] "Generic (PLEG): container finished" podID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerID="d41c63a1c4f493cef337de90e1226c40bc0d79445712f98cd191c43ad5b03ade" exitCode=0 Jan 29 12:21:28 crc kubenswrapper[4852]: I0129 12:21:28.683013 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-wpt84" event={"ID":"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006","Type":"ContainerDied","Data":"d41c63a1c4f493cef337de90e1226c40bc0d79445712f98cd191c43ad5b03ade"} Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.137433 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.218982 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged\") pod \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.219028 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data\") pod \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.219110 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts\") pod \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.219134 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle\") pod \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\" (UID: \"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006\") " Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.229717 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts" (OuterVolumeSpecName: "scripts") pod "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" (UID: "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.232609 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data" (OuterVolumeSpecName: "config-data") pod "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" (UID: "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.249789 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" (UID: "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.259383 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" (UID: "b4a5ba20-d1dd-44c5-8d54-ef56e18c5006"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.326211 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.326249 4852 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.326266 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.326278 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.464216 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.709183 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-wpt84" event={"ID":"b4a5ba20-d1dd-44c5-8d54-ef56e18c5006","Type":"ContainerDied","Data":"5f7e0038a3b6664d26b734853ec43233932edb14c7c0e287456a76f475607388"} Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.709670 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f7e0038a3b6664d26b734853ec43233932edb14c7c0e287456a76f475607388" Jan 29 12:21:30 crc kubenswrapper[4852]: I0129 12:21:30.709256 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-wpt84" Jan 29 12:21:31 crc kubenswrapper[4852]: I0129 12:21:31.721345 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719"} Jan 29 12:21:33 crc kubenswrapper[4852]: I0129 12:21:33.387093 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-xw2qz" Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.011097 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.013336 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="octavia-amphora-httpd" containerID="cri-o://54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee" gracePeriod=30 Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.677225 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.764573 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config\") pod \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.764688 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image\") pod \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\" (UID: \"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0\") " Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.797895 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" (UID: "457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.867188 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" (UID: "457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.867532 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:01 crc kubenswrapper[4852]: I0129 12:22:01.867566 4852 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0-amphora-image\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.034957 4852 generic.go:334] "Generic (PLEG): container finished" podID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerID="54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee" exitCode=0 Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.035028 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.035069 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerDied","Data":"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee"} Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.035455 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7s2p8" event={"ID":"457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0","Type":"ContainerDied","Data":"1203c6fd2a39a12626698313157f55610e331c9da495e6c60edbd0497fdcaabf"} Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.035478 4852 scope.go:117] "RemoveContainer" containerID="54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.067071 4852 scope.go:117] "RemoveContainer" containerID="d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.075673 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.084816 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7s2p8"] Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.089110 4852 scope.go:117] "RemoveContainer" containerID="54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee" Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.089850 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee\": container with ID starting with 54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee not found: ID does not exist" containerID="54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.089892 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee"} err="failed to get container status \"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee\": rpc error: code = NotFound desc = could not find container \"54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee\": container with ID starting with 54918b38bd7415c3a8e7aa767330379905c25a73a53da19717e741ed629477ee not found: ID does not exist" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.089919 4852 scope.go:117] "RemoveContainer" containerID="d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0" Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.090417 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0\": container with ID starting with d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0 not found: ID does not exist" containerID="d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.090438 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0"} err="failed to get container status \"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0\": rpc error: code = NotFound desc = could not find container \"d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0\": container with ID starting with d223ae0122925320a96422828f1d928cab97f511ad94dfa5e3e20f6e3a2180b0 not found: ID does not exist" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.169474 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.170261 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="init" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170284 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="init" Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.170314 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="octavia-amphora-httpd" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170323 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="octavia-amphora-httpd" Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.170354 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerName="init" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170364 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerName="init" Jan 29 12:22:02 crc kubenswrapper[4852]: E0129 12:22:02.170377 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerName="octavia-db-sync" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170384 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerName="octavia-db-sync" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170631 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" containerName="octavia-amphora-httpd" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.170658 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" containerName="octavia-db-sync" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.174658 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.200022 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.278615 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.278696 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgwdr\" (UniqueName: \"kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.278750 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.380308 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.380371 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgwdr\" (UniqueName: \"kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.380412 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.380898 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.380904 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.411233 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgwdr\" (UniqueName: \"kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr\") pod \"redhat-marketplace-w6gf4\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:02 crc kubenswrapper[4852]: I0129 12:22:02.528071 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:03 crc kubenswrapper[4852]: I0129 12:22:03.005843 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:03 crc kubenswrapper[4852]: I0129 12:22:03.064750 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerStarted","Data":"a5174877cf80bde1181ff7f9665c0b235ab35bfd23b02856c645789f4433d148"} Jan 29 12:22:03 crc kubenswrapper[4852]: I0129 12:22:03.475512 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0" path="/var/lib/kubelet/pods/457fa9d8-5a5c-48f5-b2a1-6329d8f16cb0/volumes" Jan 29 12:22:04 crc kubenswrapper[4852]: I0129 12:22:04.076828 4852 generic.go:334] "Generic (PLEG): container finished" podID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerID="e9ddaa8939c302e5cb4bdd0b74eb79e4e9c899699cd351485159fbf60fdfd743" exitCode=0 Jan 29 12:22:04 crc kubenswrapper[4852]: I0129 12:22:04.077848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerDied","Data":"e9ddaa8939c302e5cb4bdd0b74eb79e4e9c899699cd351485159fbf60fdfd743"} Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.102949 4852 generic.go:334] "Generic (PLEG): container finished" podID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerID="63c6c69cf0128711da74e0cffdb20275dc4f6db7f4fab4c8280eedb8f0676721" exitCode=0 Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.103153 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerDied","Data":"63c6c69cf0128711da74e0cffdb20275dc4f6db7f4fab4c8280eedb8f0676721"} Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.380475 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-qw2nx"] Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.382267 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.387571 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.402477 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-qw2nx"] Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.459691 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/19b0136d-e079-4cce-95ce-530955f5929a-amphora-image\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.460153 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/19b0136d-e079-4cce-95ce-530955f5929a-httpd-config\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.562386 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/19b0136d-e079-4cce-95ce-530955f5929a-httpd-config\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.562523 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/19b0136d-e079-4cce-95ce-530955f5929a-amphora-image\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.563780 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/19b0136d-e079-4cce-95ce-530955f5929a-amphora-image\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.568942 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/19b0136d-e079-4cce-95ce-530955f5929a-httpd-config\") pod \"octavia-image-upload-59f8cff499-qw2nx\" (UID: \"19b0136d-e079-4cce-95ce-530955f5929a\") " pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:06 crc kubenswrapper[4852]: I0129 12:22:06.698367 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.123788 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-hchhs"] Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.126687 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.131661 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.131815 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.132174 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.135358 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-hchhs"] Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182209 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data-merged\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182339 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0334a3c1-e942-474f-8d03-eb17b89a609f-hm-ports\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182357 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-scripts\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182387 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-combined-ca-bundle\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.182409 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-amphora-certs\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.197152 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-qw2nx"] Jan 29 12:22:07 crc kubenswrapper[4852]: W0129 12:22:07.226400 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19b0136d_e079_4cce_95ce_530955f5929a.slice/crio-9bb102f92f3582e264bc45e322ce3d2f46e6f8fb6e41f04da55329446a9229dc WatchSource:0}: Error finding container 9bb102f92f3582e264bc45e322ce3d2f46e6f8fb6e41f04da55329446a9229dc: Status 404 returned error can't find the container with id 9bb102f92f3582e264bc45e322ce3d2f46e6f8fb6e41f04da55329446a9229dc Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.284936 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0334a3c1-e942-474f-8d03-eb17b89a609f-hm-ports\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.285361 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-scripts\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.285449 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-combined-ca-bundle\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.285505 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-amphora-certs\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.285734 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.285802 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data-merged\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.286700 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data-merged\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.288385 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/0334a3c1-e942-474f-8d03-eb17b89a609f-hm-ports\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.297679 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-amphora-certs\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.298044 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-scripts\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.299120 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-combined-ca-bundle\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.304285 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0334a3c1-e942-474f-8d03-eb17b89a609f-config-data\") pod \"octavia-healthmanager-hchhs\" (UID: \"0334a3c1-e942-474f-8d03-eb17b89a609f\") " pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:07 crc kubenswrapper[4852]: I0129 12:22:07.461237 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:08 crc kubenswrapper[4852]: I0129 12:22:08.139218 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" event={"ID":"19b0136d-e079-4cce-95ce-530955f5929a","Type":"ContainerStarted","Data":"7c3cd558e29ad684764f4ebd39b63f5dcfcd189cf66a205c338252a52367b45a"} Jan 29 12:22:08 crc kubenswrapper[4852]: I0129 12:22:08.140840 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" event={"ID":"19b0136d-e079-4cce-95ce-530955f5929a","Type":"ContainerStarted","Data":"9bb102f92f3582e264bc45e322ce3d2f46e6f8fb6e41f04da55329446a9229dc"} Jan 29 12:22:08 crc kubenswrapper[4852]: I0129 12:22:08.144892 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerStarted","Data":"64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0"} Jan 29 12:22:08 crc kubenswrapper[4852]: I0129 12:22:08.185531 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w6gf4" podStartSLOduration=3.202539715 podStartE2EDuration="6.185512847s" podCreationTimestamp="2026-01-29 12:22:02 +0000 UTC" firstStartedPulling="2026-01-29 12:22:04.08036861 +0000 UTC m=+6021.297699744" lastFinishedPulling="2026-01-29 12:22:07.063341732 +0000 UTC m=+6024.280672876" observedRunningTime="2026-01-29 12:22:08.17987441 +0000 UTC m=+6025.397205544" watchObservedRunningTime="2026-01-29 12:22:08.185512847 +0000 UTC m=+6025.402843981" Jan 29 12:22:08 crc kubenswrapper[4852]: I0129 12:22:08.238577 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-hchhs"] Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.157144 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-hchhs" event={"ID":"0334a3c1-e942-474f-8d03-eb17b89a609f","Type":"ContainerStarted","Data":"9370299230c8b7af3032eee101fb3388c688e9f837925a4952760e2160a337c9"} Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.157496 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-hchhs" event={"ID":"0334a3c1-e942-474f-8d03-eb17b89a609f","Type":"ContainerStarted","Data":"05eb1ae561f0fa3f2d2cea68fb7597d4a4784151c36da2093592fc9b675ebb76"} Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.161484 4852 generic.go:334] "Generic (PLEG): container finished" podID="19b0136d-e079-4cce-95ce-530955f5929a" containerID="7c3cd558e29ad684764f4ebd39b63f5dcfcd189cf66a205c338252a52367b45a" exitCode=0 Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.161750 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" event={"ID":"19b0136d-e079-4cce-95ce-530955f5929a","Type":"ContainerDied","Data":"7c3cd558e29ad684764f4ebd39b63f5dcfcd189cf66a205c338252a52367b45a"} Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.334049 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-gqwgh"] Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.335961 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.338988 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.339425 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.342470 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-gqwgh"] Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.427966 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-amphora-certs\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.428062 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data-merged\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.428194 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-scripts\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.428384 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-combined-ca-bundle\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.428447 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/531f02fa-337f-4e2a-a7fd-6877aacd308d-hm-ports\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.428545 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530092 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-combined-ca-bundle\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530150 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/531f02fa-337f-4e2a-a7fd-6877aacd308d-hm-ports\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530236 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530313 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-amphora-certs\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data-merged\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.530413 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-scripts\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.531684 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data-merged\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.531844 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/531f02fa-337f-4e2a-a7fd-6877aacd308d-hm-ports\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.537305 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-combined-ca-bundle\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.537323 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-scripts\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.538606 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-amphora-certs\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.539888 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/531f02fa-337f-4e2a-a7fd-6877aacd308d-config-data\") pod \"octavia-housekeeping-gqwgh\" (UID: \"531f02fa-337f-4e2a-a7fd-6877aacd308d\") " pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:09 crc kubenswrapper[4852]: I0129 12:22:09.681060 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:10 crc kubenswrapper[4852]: I0129 12:22:10.351053 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-gqwgh"] Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.193574 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-gqwgh" event={"ID":"531f02fa-337f-4e2a-a7fd-6877aacd308d","Type":"ContainerStarted","Data":"e7631b5999e5d2655a12b1a42b9dc5af2925617ba0569010b82fde9136f79120"} Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.535226 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-zwdbx"] Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.537189 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.541245 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.541364 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.581735 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-zwdbx"] Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.677325 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-amphora-certs\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.677384 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-config-data\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.677532 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-scripts\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.677669 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-combined-ca-bundle\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.677861 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/45059069-1ab9-4564-b135-7c9720565139-hm-ports\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.678081 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45059069-1ab9-4564-b135-7c9720565139-config-data-merged\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779630 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-amphora-certs\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779710 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-config-data\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779760 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-scripts\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779824 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-combined-ca-bundle\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779897 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/45059069-1ab9-4564-b135-7c9720565139-hm-ports\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.779981 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45059069-1ab9-4564-b135-7c9720565139-config-data-merged\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.780450 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/45059069-1ab9-4564-b135-7c9720565139-config-data-merged\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.780887 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/45059069-1ab9-4564-b135-7c9720565139-hm-ports\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.791299 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-combined-ca-bundle\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.794404 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-config-data\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.804330 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-scripts\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.805543 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/45059069-1ab9-4564-b135-7c9720565139-amphora-certs\") pod \"octavia-worker-zwdbx\" (UID: \"45059069-1ab9-4564-b135-7c9720565139\") " pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:11 crc kubenswrapper[4852]: I0129 12:22:11.883036 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.208879 4852 generic.go:334] "Generic (PLEG): container finished" podID="0334a3c1-e942-474f-8d03-eb17b89a609f" containerID="9370299230c8b7af3032eee101fb3388c688e9f837925a4952760e2160a337c9" exitCode=0 Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.209140 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-hchhs" event={"ID":"0334a3c1-e942-474f-8d03-eb17b89a609f","Type":"ContainerDied","Data":"9370299230c8b7af3032eee101fb3388c688e9f837925a4952760e2160a337c9"} Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.528807 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.529030 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.587927 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:12 crc kubenswrapper[4852]: I0129 12:22:12.622606 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-zwdbx"] Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.222044 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-zwdbx" event={"ID":"45059069-1ab9-4564-b135-7c9720565139","Type":"ContainerStarted","Data":"c9ace5a83dd0c51c4742b8060804a072c1b632c2da2b913d51d2584d1a6eeaaa"} Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.242506 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" event={"ID":"19b0136d-e079-4cce-95ce-530955f5929a","Type":"ContainerStarted","Data":"9103457008c77449b96f0aafa8b758cc6692f160923165e195754ed591dc5280"} Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.250556 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-hchhs"] Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.273791 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-qw2nx" podStartSLOduration=1.729159989 podStartE2EDuration="7.273772847s" podCreationTimestamp="2026-01-29 12:22:06 +0000 UTC" firstStartedPulling="2026-01-29 12:22:07.229171616 +0000 UTC m=+6024.446502750" lastFinishedPulling="2026-01-29 12:22:12.773784474 +0000 UTC m=+6029.991115608" observedRunningTime="2026-01-29 12:22:13.259252432 +0000 UTC m=+6030.476583576" watchObservedRunningTime="2026-01-29 12:22:13.273772847 +0000 UTC m=+6030.491103981" Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.332718 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:13 crc kubenswrapper[4852]: I0129 12:22:13.378988 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:14 crc kubenswrapper[4852]: I0129 12:22:14.260193 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-gqwgh" event={"ID":"531f02fa-337f-4e2a-a7fd-6877aacd308d","Type":"ContainerStarted","Data":"530a09ae0a2ee16253c45406c6fe6c88335614b958c74a8a3ed9a64d4495b6ba"} Jan 29 12:22:14 crc kubenswrapper[4852]: I0129 12:22:14.264724 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-hchhs" event={"ID":"0334a3c1-e942-474f-8d03-eb17b89a609f","Type":"ContainerStarted","Data":"d2c83b86d1bac03265de64e4da37cf136c8245fac1a8c146ca57c16a02f48f5e"} Jan 29 12:22:14 crc kubenswrapper[4852]: I0129 12:22:14.264873 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:14 crc kubenswrapper[4852]: I0129 12:22:14.308409 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-hchhs" podStartSLOduration=7.308387807 podStartE2EDuration="7.308387807s" podCreationTimestamp="2026-01-29 12:22:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:22:14.29664909 +0000 UTC m=+6031.513980224" watchObservedRunningTime="2026-01-29 12:22:14.308387807 +0000 UTC m=+6031.525718941" Jan 29 12:22:15 crc kubenswrapper[4852]: I0129 12:22:15.280483 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w6gf4" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="registry-server" containerID="cri-o://64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0" gracePeriod=2 Jan 29 12:22:15 crc kubenswrapper[4852]: E0129 12:22:15.719190 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b720c39_ab50_49f9_a8c0_d3a3c5502c85.slice/crio-64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.052208 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-xft2d"] Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.062349 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2184-account-create-update-cz5tw"] Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.071070 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2184-account-create-update-cz5tw"] Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.083824 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-xft2d"] Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.292855 4852 generic.go:334] "Generic (PLEG): container finished" podID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerID="64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0" exitCode=0 Jan 29 12:22:16 crc kubenswrapper[4852]: I0129 12:22:16.292904 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerDied","Data":"64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0"} Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.403424 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.480457 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55e2b822-3c56-4b25-ab41-83c6c5de861f" path="/var/lib/kubelet/pods/55e2b822-3c56-4b25-ab41-83c6c5de861f/volumes" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.481367 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b4707bf-89be-43fd-a926-b3a96b4b2e74" path="/var/lib/kubelet/pods/9b4707bf-89be-43fd-a926-b3a96b4b2e74/volumes" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.507970 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content\") pod \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.508060 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities\") pod \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.508081 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgwdr\" (UniqueName: \"kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr\") pod \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\" (UID: \"0b720c39-ab50-49f9-a8c0-d3a3c5502c85\") " Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.512421 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities" (OuterVolumeSpecName: "utilities") pod "0b720c39-ab50-49f9-a8c0-d3a3c5502c85" (UID: "0b720c39-ab50-49f9-a8c0-d3a3c5502c85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.516853 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr" (OuterVolumeSpecName: "kube-api-access-dgwdr") pod "0b720c39-ab50-49f9-a8c0-d3a3c5502c85" (UID: "0b720c39-ab50-49f9-a8c0-d3a3c5502c85"). InnerVolumeSpecName "kube-api-access-dgwdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.611155 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:17 crc kubenswrapper[4852]: I0129 12:22:17.611190 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgwdr\" (UniqueName: \"kubernetes.io/projected/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-kube-api-access-dgwdr\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.317394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6gf4" event={"ID":"0b720c39-ab50-49f9-a8c0-d3a3c5502c85","Type":"ContainerDied","Data":"a5174877cf80bde1181ff7f9665c0b235ab35bfd23b02856c645789f4433d148"} Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.317466 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6gf4" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.317837 4852 scope.go:117] "RemoveContainer" containerID="64c7c03d925c008c42d2d1d4fdb67b188c4480a476ec31d828dd755e6fe1aaa0" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.363824 4852 scope.go:117] "RemoveContainer" containerID="63c6c69cf0128711da74e0cffdb20275dc4f6db7f4fab4c8280eedb8f0676721" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.405447 4852 scope.go:117] "RemoveContainer" containerID="e9ddaa8939c302e5cb4bdd0b74eb79e4e9c899699cd351485159fbf60fdfd743" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.916844 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b720c39-ab50-49f9-a8c0-d3a3c5502c85" (UID: "0b720c39-ab50-49f9-a8c0-d3a3c5502c85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.940122 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b720c39-ab50-49f9-a8c0-d3a3c5502c85-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.956372 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:18 crc kubenswrapper[4852]: I0129 12:22:18.972443 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6gf4"] Jan 29 12:22:19 crc kubenswrapper[4852]: I0129 12:22:19.331461 4852 generic.go:334] "Generic (PLEG): container finished" podID="531f02fa-337f-4e2a-a7fd-6877aacd308d" containerID="530a09ae0a2ee16253c45406c6fe6c88335614b958c74a8a3ed9a64d4495b6ba" exitCode=0 Jan 29 12:22:19 crc kubenswrapper[4852]: I0129 12:22:19.331615 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-gqwgh" event={"ID":"531f02fa-337f-4e2a-a7fd-6877aacd308d","Type":"ContainerDied","Data":"530a09ae0a2ee16253c45406c6fe6c88335614b958c74a8a3ed9a64d4495b6ba"} Jan 29 12:22:19 crc kubenswrapper[4852]: I0129 12:22:19.505563 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" path="/var/lib/kubelet/pods/0b720c39-ab50-49f9-a8c0-d3a3c5502c85/volumes" Jan 29 12:22:20 crc kubenswrapper[4852]: I0129 12:22:20.347767 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-zwdbx" event={"ID":"45059069-1ab9-4564-b135-7c9720565139","Type":"ContainerStarted","Data":"e143465994491ae1de22777c91fdf96523dc01c81d0ce878717c803e52f68767"} Jan 29 12:22:20 crc kubenswrapper[4852]: I0129 12:22:20.351423 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-gqwgh" event={"ID":"531f02fa-337f-4e2a-a7fd-6877aacd308d","Type":"ContainerStarted","Data":"f230fa485549b263524ce79c317448d4571257592d3bc9bb351fae14a0d97fa6"} Jan 29 12:22:20 crc kubenswrapper[4852]: I0129 12:22:20.352001 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:20 crc kubenswrapper[4852]: I0129 12:22:20.399613 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-gqwgh" podStartSLOduration=8.70641555 podStartE2EDuration="11.399574384s" podCreationTimestamp="2026-01-29 12:22:09 +0000 UTC" firstStartedPulling="2026-01-29 12:22:10.368149032 +0000 UTC m=+6027.585480166" lastFinishedPulling="2026-01-29 12:22:13.061307866 +0000 UTC m=+6030.278639000" observedRunningTime="2026-01-29 12:22:20.393164298 +0000 UTC m=+6037.610495452" watchObservedRunningTime="2026-01-29 12:22:20.399574384 +0000 UTC m=+6037.616905518" Jan 29 12:22:21 crc kubenswrapper[4852]: I0129 12:22:21.032462 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-5bqts"] Jan 29 12:22:21 crc kubenswrapper[4852]: I0129 12:22:21.043500 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-5bqts"] Jan 29 12:22:21 crc kubenswrapper[4852]: I0129 12:22:21.378738 4852 generic.go:334] "Generic (PLEG): container finished" podID="45059069-1ab9-4564-b135-7c9720565139" containerID="e143465994491ae1de22777c91fdf96523dc01c81d0ce878717c803e52f68767" exitCode=0 Jan 29 12:22:21 crc kubenswrapper[4852]: I0129 12:22:21.378823 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-zwdbx" event={"ID":"45059069-1ab9-4564-b135-7c9720565139","Type":"ContainerDied","Data":"e143465994491ae1de22777c91fdf96523dc01c81d0ce878717c803e52f68767"} Jan 29 12:22:21 crc kubenswrapper[4852]: I0129 12:22:21.489254 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc7a2f32-b05e-40f8-ac12-2af1f05a5578" path="/var/lib/kubelet/pods/cc7a2f32-b05e-40f8-ac12-2af1f05a5578/volumes" Jan 29 12:22:22 crc kubenswrapper[4852]: I0129 12:22:22.402644 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-zwdbx" event={"ID":"45059069-1ab9-4564-b135-7c9720565139","Type":"ContainerStarted","Data":"5e719f14a34b0fecefa5a78d1bf2e73ab00f12274e719c736d9f275ff4dfd420"} Jan 29 12:22:22 crc kubenswrapper[4852]: I0129 12:22:22.403506 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:22 crc kubenswrapper[4852]: I0129 12:22:22.502654 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-hchhs" Jan 29 12:22:22 crc kubenswrapper[4852]: I0129 12:22:22.529325 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-zwdbx" podStartSLOduration=4.864224467 podStartE2EDuration="11.529302739s" podCreationTimestamp="2026-01-29 12:22:11 +0000 UTC" firstStartedPulling="2026-01-29 12:22:12.772297258 +0000 UTC m=+6029.989628392" lastFinishedPulling="2026-01-29 12:22:19.43737553 +0000 UTC m=+6036.654706664" observedRunningTime="2026-01-29 12:22:22.432216952 +0000 UTC m=+6039.649548086" watchObservedRunningTime="2026-01-29 12:22:22.529302739 +0000 UTC m=+6039.746633873" Jan 29 12:22:26 crc kubenswrapper[4852]: I0129 12:22:26.928280 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-zwdbx" Jan 29 12:22:39 crc kubenswrapper[4852]: I0129 12:22:39.730259 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-gqwgh" Jan 29 12:22:50 crc kubenswrapper[4852]: I0129 12:22:50.063033 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3e22-account-create-update-6xdwl"] Jan 29 12:22:50 crc kubenswrapper[4852]: I0129 12:22:50.079728 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-27mtn"] Jan 29 12:22:50 crc kubenswrapper[4852]: I0129 12:22:50.091400 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-27mtn"] Jan 29 12:22:50 crc kubenswrapper[4852]: I0129 12:22:50.105771 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3e22-account-create-update-6xdwl"] Jan 29 12:22:51 crc kubenswrapper[4852]: I0129 12:22:51.034276 4852 scope.go:117] "RemoveContainer" containerID="94920d60be493663535dabd75b56284e4f46e8aee5e1fe34ebbca489e86c5542" Jan 29 12:22:51 crc kubenswrapper[4852]: I0129 12:22:51.482719 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ac2a1fa-4357-4674-92d2-2751732311a2" path="/var/lib/kubelet/pods/0ac2a1fa-4357-4674-92d2-2751732311a2/volumes" Jan 29 12:22:51 crc kubenswrapper[4852]: I0129 12:22:51.483948 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5" path="/var/lib/kubelet/pods/7ddaf8fe-ecdb-47c0-9d25-2bef85eb9ab5/volumes" Jan 29 12:22:52 crc kubenswrapper[4852]: I0129 12:22:52.554274 4852 scope.go:117] "RemoveContainer" containerID="321c1d8766955206df377411efc213c93d8bb42df8b541832488fdd98016f8c4" Jan 29 12:22:52 crc kubenswrapper[4852]: I0129 12:22:52.606836 4852 scope.go:117] "RemoveContainer" containerID="baf63117abd86389987f03944932929afb47d30abb61d68c6412aa66584f4e24" Jan 29 12:23:01 crc kubenswrapper[4852]: I0129 12:23:01.030747 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-9gsbw"] Jan 29 12:23:01 crc kubenswrapper[4852]: I0129 12:23:01.039222 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-9gsbw"] Jan 29 12:23:01 crc kubenswrapper[4852]: I0129 12:23:01.473002 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32686705-bf27-4ee5-9d96-39e17e9512a3" path="/var/lib/kubelet/pods/32686705-bf27-4ee5-9d96-39e17e9512a3/volumes" Jan 29 12:23:30 crc kubenswrapper[4852]: I0129 12:23:30.017800 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:23:30 crc kubenswrapper[4852]: I0129 12:23:30.018552 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.475759 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:35 crc kubenswrapper[4852]: E0129 12:23:35.476633 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="registry-server" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.476647 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="registry-server" Jan 29 12:23:35 crc kubenswrapper[4852]: E0129 12:23:35.476667 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="extract-content" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.476673 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="extract-content" Jan 29 12:23:35 crc kubenswrapper[4852]: E0129 12:23:35.476700 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="extract-utilities" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.476706 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="extract-utilities" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.476887 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b720c39-ab50-49f9-a8c0-d3a3c5502c85" containerName="registry-server" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.478034 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.480438 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.480485 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.480451 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-l9k6v" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.480903 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.481402 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.533558 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.533857 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-log" containerID="cri-o://477c38f3d0f87ca787dd0356a5c5555536cea472d2232f6237c4ec314f17d311" gracePeriod=30 Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.534389 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-httpd" containerID="cri-o://ee9925a0b1a59b2f18a02c9a41f2fc272ba5cc720235c90a7390316b7a640a19" gracePeriod=30 Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.574560 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.584652 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.596428 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.610845 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.611152 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-log" containerID="cri-o://818d5943059b1be294fd900bff7493265d9f751e2a7cbcfedddf3f01fd99164f" gracePeriod=30 Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.611328 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-httpd" containerID="cri-o://1a022c41f9a4dba7aa1af297af043a773c50fbd4f4b1128a006e75dc5a0cdd39" gracePeriod=30 Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.645997 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.646097 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt772\" (UniqueName: \"kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.646129 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.646453 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.646487 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.748639 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.748699 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.748762 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.748798 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.748825 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.749312 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.749562 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.749671 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt772\" (UniqueName: \"kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.750015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.750081 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.750112 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.750171 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.750246 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd4h9\" (UniqueName: \"kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.756034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.778063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt772\" (UniqueName: \"kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772\") pod \"horizon-679cc7fcbf-759j5\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.810604 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.852035 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.852094 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.852194 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd4h9\" (UniqueName: \"kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.852284 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.852321 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.853744 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.853824 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.854501 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.857060 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.872337 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd4h9\" (UniqueName: \"kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9\") pod \"horizon-6978b99b85-59x2d\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:35 crc kubenswrapper[4852]: I0129 12:23:35.914469 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.194487 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.199098 4852 generic.go:334] "Generic (PLEG): container finished" podID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerID="818d5943059b1be294fd900bff7493265d9f751e2a7cbcfedddf3f01fd99164f" exitCode=143 Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.199149 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerDied","Data":"818d5943059b1be294fd900bff7493265d9f751e2a7cbcfedddf3f01fd99164f"} Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.200823 4852 generic.go:334] "Generic (PLEG): container finished" podID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerID="477c38f3d0f87ca787dd0356a5c5555536cea472d2232f6237c4ec314f17d311" exitCode=143 Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.200847 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerDied","Data":"477c38f3d0f87ca787dd0356a5c5555536cea472d2232f6237c4ec314f17d311"} Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.233784 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.235845 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.240751 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.340972 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.341959 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.377650 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.377738 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.377794 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.377842 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.377890 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rwm9\" (UniqueName: \"kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: W0129 12:23:36.443780 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39274d66_6aa0_4214_830d_044100a544ca.slice/crio-e744fc61e0cc96ef5ca4ab62e7aeadadbe736c46d8a3f370fa327a65666eb220 WatchSource:0}: Error finding container e744fc61e0cc96ef5ca4ab62e7aeadadbe736c46d8a3f370fa327a65666eb220: Status 404 returned error can't find the container with id e744fc61e0cc96ef5ca4ab62e7aeadadbe736c46d8a3f370fa327a65666eb220 Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.449861 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.479450 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.479524 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.479558 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.479619 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.480527 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rwm9\" (UniqueName: \"kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.481327 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.482195 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.482830 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.485173 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.499494 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rwm9\" (UniqueName: \"kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9\") pod \"horizon-85446f64c7-pbx75\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:36 crc kubenswrapper[4852]: I0129 12:23:36.609910 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:23:37 crc kubenswrapper[4852]: I0129 12:23:37.092957 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:23:37 crc kubenswrapper[4852]: I0129 12:23:37.222718 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerStarted","Data":"e744fc61e0cc96ef5ca4ab62e7aeadadbe736c46d8a3f370fa327a65666eb220"} Jan 29 12:23:37 crc kubenswrapper[4852]: I0129 12:23:37.236463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-679cc7fcbf-759j5" event={"ID":"4e0934f2-d5c3-456f-ae57-5872be063940","Type":"ContainerStarted","Data":"16440ce6c1c1fc5543b3359a3daf48aab927f15c47ad09d8c882432f29fb8ca3"} Jan 29 12:23:37 crc kubenswrapper[4852]: I0129 12:23:37.274562 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerStarted","Data":"d19824b404be85030fe6bfd4755eddbd591f570a05d12f26bb45d0f801d90857"} Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.298472 4852 generic.go:334] "Generic (PLEG): container finished" podID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerID="ee9925a0b1a59b2f18a02c9a41f2fc272ba5cc720235c90a7390316b7a640a19" exitCode=0 Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.298560 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerDied","Data":"ee9925a0b1a59b2f18a02c9a41f2fc272ba5cc720235c90a7390316b7a640a19"} Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.298953 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4c7fc2f-94a0-4558-9e2b-f3578f70f273","Type":"ContainerDied","Data":"a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44"} Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.298973 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2000d6e739e7b513717deb7cdaf2a6cd5f5ea7d8f8b5721a87612c15b78ff44" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.301876 4852 generic.go:334] "Generic (PLEG): container finished" podID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerID="1a022c41f9a4dba7aa1af297af043a773c50fbd4f4b1128a006e75dc5a0cdd39" exitCode=0 Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.301928 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerDied","Data":"1a022c41f9a4dba7aa1af297af043a773c50fbd4f4b1128a006e75dc5a0cdd39"} Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.331333 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.440511 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441186 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441266 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441455 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tgl2\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441534 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441709 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.441813 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle\") pod \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\" (UID: \"e4c7fc2f-94a0-4558-9e2b-f3578f70f273\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.442072 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.442436 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.448778 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph" (OuterVolumeSpecName: "ceph") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.449005 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2" (OuterVolumeSpecName: "kube-api-access-2tgl2") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "kube-api-access-2tgl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.450850 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts" (OuterVolumeSpecName: "scripts") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.452703 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.463714 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs" (OuterVolumeSpecName: "logs") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.476222 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.540157 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data" (OuterVolumeSpecName: "config-data") pod "e4c7fc2f-94a0-4558-9e2b-f3578f70f273" (UID: "e4c7fc2f-94a0-4558-9e2b-f3578f70f273"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.543903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.543999 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544079 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544099 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544134 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcqnt\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544156 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544251 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle\") pod \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\" (UID: \"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7\") " Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.544934 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545481 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545503 4852 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545513 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545522 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545531 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tgl2\" (UniqueName: \"kubernetes.io/projected/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-kube-api-access-2tgl2\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545563 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.545573 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c7fc2f-94a0-4558-9e2b-f3578f70f273-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.546015 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs" (OuterVolumeSpecName: "logs") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.548678 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts" (OuterVolumeSpecName: "scripts") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.548787 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph" (OuterVolumeSpecName: "ceph") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.548768 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt" (OuterVolumeSpecName: "kube-api-access-xcqnt") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "kube-api-access-xcqnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.576023 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.597319 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data" (OuterVolumeSpecName: "config-data") pod "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" (UID: "8a3b7f34-0f40-4a83-93d2-5124b94eb7f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647555 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcqnt\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-kube-api-access-xcqnt\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647611 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647626 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647638 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647651 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:39 crc kubenswrapper[4852]: I0129 12:23:39.647661 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.318162 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8a3b7f34-0f40-4a83-93d2-5124b94eb7f7","Type":"ContainerDied","Data":"49b03f24661ff317aa78cc254c4a58731713b236418e2759496c18e2b859f52f"} Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.318602 4852 scope.go:117] "RemoveContainer" containerID="1a022c41f9a4dba7aa1af297af043a773c50fbd4f4b1128a006e75dc5a0cdd39" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.318372 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.318410 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.365096 4852 scope.go:117] "RemoveContainer" containerID="818d5943059b1be294fd900bff7493265d9f751e2a7cbcfedddf3f01fd99164f" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.367665 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.388874 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.398789 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.413314 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.445301 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: E0129 12:23:40.446063 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446091 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: E0129 12:23:40.446137 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446149 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: E0129 12:23:40.446164 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446179 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: E0129 12:23:40.446218 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446230 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446519 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446542 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446564 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" containerName="glance-httpd" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.446625 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" containerName="glance-log" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.448149 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.450349 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-88bfk" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.450676 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.451307 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.465121 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.472323 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.478098 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.478318 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.486623 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.579568 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.579655 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-logs\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.579678 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6ztq\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-kube-api-access-h6ztq\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580135 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-ceph\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580273 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580289 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580538 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580604 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580706 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpclp\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-kube-api-access-kpclp\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.580805 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-scripts\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.581070 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-logs\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.581116 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-config-data\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.581149 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.581294 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683423 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683499 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683566 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-logs\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683612 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6ztq\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-kube-api-access-h6ztq\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683672 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-ceph\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683796 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683821 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683885 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683911 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.683973 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpclp\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-kube-api-access-kpclp\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684016 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-scripts\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684073 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-logs\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684104 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-logs\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684141 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-config-data\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684194 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684332 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/04a9fabd-cf4f-4357-8490-c232eada6b3a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.684479 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.685008 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/212d90fc-8389-41db-b187-e76812e4d072-logs\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.689687 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.690066 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.694017 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.695360 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-scripts\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.700705 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/212d90fc-8389-41db-b187-e76812e4d072-config-data\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.702016 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-ceph\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.702287 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.702714 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04a9fabd-cf4f-4357-8490-c232eada6b3a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.710394 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpclp\" (UniqueName: \"kubernetes.io/projected/04a9fabd-cf4f-4357-8490-c232eada6b3a-kube-api-access-kpclp\") pod \"glance-default-internal-api-0\" (UID: \"04a9fabd-cf4f-4357-8490-c232eada6b3a\") " pod="openstack/glance-default-internal-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.715056 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6ztq\" (UniqueName: \"kubernetes.io/projected/212d90fc-8389-41db-b187-e76812e4d072-kube-api-access-h6ztq\") pod \"glance-default-external-api-0\" (UID: \"212d90fc-8389-41db-b187-e76812e4d072\") " pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.776039 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 12:23:40 crc kubenswrapper[4852]: I0129 12:23:40.798442 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 12:23:41 crc kubenswrapper[4852]: I0129 12:23:41.484251 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a3b7f34-0f40-4a83-93d2-5124b94eb7f7" path="/var/lib/kubelet/pods/8a3b7f34-0f40-4a83-93d2-5124b94eb7f7/volumes" Jan 29 12:23:41 crc kubenswrapper[4852]: I0129 12:23:41.485504 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4c7fc2f-94a0-4558-9e2b-f3578f70f273" path="/var/lib/kubelet/pods/e4c7fc2f-94a0-4558-9e2b-f3578f70f273/volumes" Jan 29 12:23:42 crc kubenswrapper[4852]: I0129 12:23:42.041767 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-h4dh6"] Jan 29 12:23:42 crc kubenswrapper[4852]: I0129 12:23:42.052743 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-1259-account-create-update-5d7vf"] Jan 29 12:23:42 crc kubenswrapper[4852]: I0129 12:23:42.062572 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-1259-account-create-update-5d7vf"] Jan 29 12:23:42 crc kubenswrapper[4852]: I0129 12:23:42.073011 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-h4dh6"] Jan 29 12:23:43 crc kubenswrapper[4852]: I0129 12:23:43.483997 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd267b59-5fce-4fe4-850f-d86ca19bc1e6" path="/var/lib/kubelet/pods/bd267b59-5fce-4fe4-850f-d86ca19bc1e6/volumes" Jan 29 12:23:43 crc kubenswrapper[4852]: I0129 12:23:43.486106 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d14a7f30-2ff8-4ea0-b52d-f345571b2d79" path="/var/lib/kubelet/pods/d14a7f30-2ff8-4ea0-b52d-f345571b2d79/volumes" Jan 29 12:23:47 crc kubenswrapper[4852]: I0129 12:23:47.078134 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 12:23:47 crc kubenswrapper[4852]: I0129 12:23:47.300629 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 12:23:51 crc kubenswrapper[4852]: I0129 12:23:51.042107 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-mtgj8"] Jan 29 12:23:51 crc kubenswrapper[4852]: I0129 12:23:51.056043 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-mtgj8"] Jan 29 12:23:51 crc kubenswrapper[4852]: I0129 12:23:51.485859 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54b7c49e-9d11-4519-8721-9838904b77db" path="/var/lib/kubelet/pods/54b7c49e-9d11-4519-8721-9838904b77db/volumes" Jan 29 12:23:51 crc kubenswrapper[4852]: E0129 12:23:51.624019 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 29 12:23:51 crc kubenswrapper[4852]: E0129 12:23:51.624325 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n89h584hddh678h5b5h565h77h574h76h57ch648h5fdh595h5f5h5c6hb9h56h555h5dbh547h9dhbh55fhcbh594hc6h594h67bh565h5d8hd9h5c9q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fd4h9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6978b99b85-59x2d_openstack(39274d66-6aa0-4214-830d-044100a544ca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:23:52 crc kubenswrapper[4852]: E0129 12:23:52.671523 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 29 12:23:52 crc kubenswrapper[4852]: E0129 12:23:52.672074 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f7h57fh54bhb8h5d4h596h5b6h65ch59dh87h5dch566hd4h54dh546h586hf7h647h78h684h6ch5ffh697h694h5dch5d7h5dch5cfh5ch67fh54dh548q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qt772,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-679cc7fcbf-759j5_openstack(4e0934f2-d5c3-456f-ae57-5872be063940): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:23:52 crc kubenswrapper[4852]: I0129 12:23:52.761150 4852 scope.go:117] "RemoveContainer" containerID="e968797c0c4e727e777729bc0e355968cd2222a97910fb455bb9d034f94731a0" Jan 29 12:23:53 crc kubenswrapper[4852]: E0129 12:23:53.189899 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" Jan 29 12:23:53 crc kubenswrapper[4852]: E0129 12:23:53.190182 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-679cc7fcbf-759j5" podUID="4e0934f2-d5c3-456f-ae57-5872be063940" Jan 29 12:23:53 crc kubenswrapper[4852]: W0129 12:23:53.205198 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04a9fabd_cf4f_4357_8490_c232eada6b3a.slice/crio-ca12b9942b4ad96c5c8fecb743d72ce2625655126309d2a7ef8a62994f4befa9 WatchSource:0}: Error finding container ca12b9942b4ad96c5c8fecb743d72ce2625655126309d2a7ef8a62994f4befa9: Status 404 returned error can't find the container with id ca12b9942b4ad96c5c8fecb743d72ce2625655126309d2a7ef8a62994f4befa9 Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.415455 4852 scope.go:117] "RemoveContainer" containerID="03168d6d9f0d6ba525ce6668927c5a310220c0f48476b640626c13fbcc2080ae" Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.502070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"04a9fabd-cf4f-4357-8490-c232eada6b3a","Type":"ContainerStarted","Data":"ca12b9942b4ad96c5c8fecb743d72ce2625655126309d2a7ef8a62994f4befa9"} Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.505041 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"212d90fc-8389-41db-b187-e76812e4d072","Type":"ContainerStarted","Data":"d057850201b1fab2e2b113a2f22b57dfbc2a31a943795df12c015438df5be0f3"} Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.514401 4852 scope.go:117] "RemoveContainer" containerID="477c38f3d0f87ca787dd0356a5c5555536cea472d2232f6237c4ec314f17d311" Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.718834 4852 scope.go:117] "RemoveContainer" containerID="4df0cd6423b578dfa032b8bbf476910d03cf16e621f8ddbb5016711471c56149" Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.794209 4852 scope.go:117] "RemoveContainer" containerID="4f6e10e7616a7b8147899e25afb944346027559244b495315c5d5efbf38532ee" Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.912856 4852 scope.go:117] "RemoveContainer" containerID="1364c4d996e4acc7b384a9241c5a041f99c2d44d11d9db2d301449127609f632" Jan 29 12:23:53 crc kubenswrapper[4852]: I0129 12:23:53.925457 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.086867 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts\") pod \"4e0934f2-d5c3-456f-ae57-5872be063940\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087081 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data\") pod \"4e0934f2-d5c3-456f-ae57-5872be063940\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087173 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key\") pod \"4e0934f2-d5c3-456f-ae57-5872be063940\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087334 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt772\" (UniqueName: \"kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772\") pod \"4e0934f2-d5c3-456f-ae57-5872be063940\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087383 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs\") pod \"4e0934f2-d5c3-456f-ae57-5872be063940\" (UID: \"4e0934f2-d5c3-456f-ae57-5872be063940\") " Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087700 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts" (OuterVolumeSpecName: "scripts") pod "4e0934f2-d5c3-456f-ae57-5872be063940" (UID: "4e0934f2-d5c3-456f-ae57-5872be063940"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.087855 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs" (OuterVolumeSpecName: "logs") pod "4e0934f2-d5c3-456f-ae57-5872be063940" (UID: "4e0934f2-d5c3-456f-ae57-5872be063940"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.088047 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e0934f2-d5c3-456f-ae57-5872be063940-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.088064 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.089047 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data" (OuterVolumeSpecName: "config-data") pod "4e0934f2-d5c3-456f-ae57-5872be063940" (UID: "4e0934f2-d5c3-456f-ae57-5872be063940"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.093682 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772" (OuterVolumeSpecName: "kube-api-access-qt772") pod "4e0934f2-d5c3-456f-ae57-5872be063940" (UID: "4e0934f2-d5c3-456f-ae57-5872be063940"). InnerVolumeSpecName "kube-api-access-qt772". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.093693 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4e0934f2-d5c3-456f-ae57-5872be063940" (UID: "4e0934f2-d5c3-456f-ae57-5872be063940"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.190748 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e0934f2-d5c3-456f-ae57-5872be063940-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.190820 4852 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4e0934f2-d5c3-456f-ae57-5872be063940-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.190850 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt772\" (UniqueName: \"kubernetes.io/projected/4e0934f2-d5c3-456f-ae57-5872be063940-kube-api-access-qt772\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.477563 4852 scope.go:117] "RemoveContainer" containerID="ee9925a0b1a59b2f18a02c9a41f2fc272ba5cc720235c90a7390316b7a640a19" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.525233 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-679cc7fcbf-759j5" event={"ID":"4e0934f2-d5c3-456f-ae57-5872be063940","Type":"ContainerDied","Data":"16440ce6c1c1fc5543b3359a3daf48aab927f15c47ad09d8c882432f29fb8ca3"} Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.525268 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-679cc7fcbf-759j5" Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.608714 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:54 crc kubenswrapper[4852]: I0129 12:23:54.623325 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-679cc7fcbf-759j5"] Jan 29 12:23:55 crc kubenswrapper[4852]: I0129 12:23:55.002299 4852 scope.go:117] "RemoveContainer" containerID="714fe3408bbf0103f8209f6aabe1724085938e274b10943b38023ae629939271" Jan 29 12:23:55 crc kubenswrapper[4852]: I0129 12:23:55.478820 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e0934f2-d5c3-456f-ae57-5872be063940" path="/var/lib/kubelet/pods/4e0934f2-d5c3-456f-ae57-5872be063940/volumes" Jan 29 12:23:55 crc kubenswrapper[4852]: I0129 12:23:55.550161 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerStarted","Data":"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf"} Jan 29 12:23:55 crc kubenswrapper[4852]: I0129 12:23:55.552116 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"212d90fc-8389-41db-b187-e76812e4d072","Type":"ContainerStarted","Data":"8e07d4a73ca8fe04f1b24af22c1653bd2201f4e52a5519ed3797c611982a43a9"} Jan 29 12:23:55 crc kubenswrapper[4852]: I0129 12:23:55.554095 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerStarted","Data":"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c"} Jan 29 12:23:56 crc kubenswrapper[4852]: I0129 12:23:56.570923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"04a9fabd-cf4f-4357-8490-c232eada6b3a","Type":"ContainerStarted","Data":"91ccbdb0cd9df7e7ceaf5abd963def145d73c9630037f8591afd742381ac21e0"} Jan 29 12:23:57 crc kubenswrapper[4852]: I0129 12:23:57.580611 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerStarted","Data":"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd"} Jan 29 12:23:58 crc kubenswrapper[4852]: I0129 12:23:58.593285 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerStarted","Data":"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7"} Jan 29 12:23:59 crc kubenswrapper[4852]: I0129 12:23:59.612168 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"212d90fc-8389-41db-b187-e76812e4d072","Type":"ContainerStarted","Data":"1d0ea21ba999aca16c2b5dece25deededc3669a3f9771c3f8ef3861f0e5d77a1"} Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.017819 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.017942 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.624760 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"04a9fabd-cf4f-4357-8490-c232eada6b3a","Type":"ContainerStarted","Data":"d5c56a5818dc457a6812a2b032936c333a8575060f22deb423baaaf554b46b72"} Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.653655 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=20.653636915 podStartE2EDuration="20.653636915s" podCreationTimestamp="2026-01-29 12:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:00.651996446 +0000 UTC m=+6137.869327610" watchObservedRunningTime="2026-01-29 12:24:00.653636915 +0000 UTC m=+6137.870968049" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.681375 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=20.681336001 podStartE2EDuration="20.681336001s" podCreationTimestamp="2026-01-29 12:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:00.677710242 +0000 UTC m=+6137.895041406" watchObservedRunningTime="2026-01-29 12:24:00.681336001 +0000 UTC m=+6137.898667145" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.706617 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-85446f64c7-pbx75" podStartSLOduration=8.412259079 podStartE2EDuration="24.706597947s" podCreationTimestamp="2026-01-29 12:23:36 +0000 UTC" firstStartedPulling="2026-01-29 12:23:37.123944498 +0000 UTC m=+6114.341275642" lastFinishedPulling="2026-01-29 12:23:53.418283366 +0000 UTC m=+6130.635614510" observedRunningTime="2026-01-29 12:24:00.698704154 +0000 UTC m=+6137.916035308" watchObservedRunningTime="2026-01-29 12:24:00.706597947 +0000 UTC m=+6137.923929081" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.732528 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6978b99b85-59x2d" podStartSLOduration=-9223372011.122269 podStartE2EDuration="25.732507428s" podCreationTimestamp="2026-01-29 12:23:35 +0000 UTC" firstStartedPulling="2026-01-29 12:23:36.447099882 +0000 UTC m=+6113.664431016" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:00.722374012 +0000 UTC m=+6137.939705156" watchObservedRunningTime="2026-01-29 12:24:00.732507428 +0000 UTC m=+6137.949838572" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.776817 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.777182 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.799761 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.799821 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.816459 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.827474 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.850713 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:00 crc kubenswrapper[4852]: I0129 12:24:00.852383 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:01 crc kubenswrapper[4852]: I0129 12:24:01.634050 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:01 crc kubenswrapper[4852]: I0129 12:24:01.634124 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 12:24:01 crc kubenswrapper[4852]: I0129 12:24:01.634139 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:01 crc kubenswrapper[4852]: I0129 12:24:01.634151 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 12:24:05 crc kubenswrapper[4852]: I0129 12:24:05.915623 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:24:05 crc kubenswrapper[4852]: I0129 12:24:05.916093 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:24:06 crc kubenswrapper[4852]: I0129 12:24:06.024507 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:06 crc kubenswrapper[4852]: I0129 12:24:06.158160 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 12:24:06 crc kubenswrapper[4852]: I0129 12:24:06.167010 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 12:24:06 crc kubenswrapper[4852]: I0129 12:24:06.610841 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:24:06 crc kubenswrapper[4852]: I0129 12:24:06.611178 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:24:08 crc kubenswrapper[4852]: I0129 12:24:08.088339 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 12:24:15 crc kubenswrapper[4852]: I0129 12:24:15.918808 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Jan 29 12:24:16 crc kubenswrapper[4852]: I0129 12:24:16.611745 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.111:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.111:8080: connect: connection refused" Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.046447 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f77b-account-create-update-rjxjs"] Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.057833 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-5g48v"] Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.067699 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f77b-account-create-update-rjxjs"] Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.080302 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-5g48v"] Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.476034 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06f552c0-01ac-4362-9493-19f42cbfcbee" path="/var/lib/kubelet/pods/06f552c0-01ac-4362-9493-19f42cbfcbee/volumes" Jan 29 12:24:21 crc kubenswrapper[4852]: I0129 12:24:21.477874 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03" path="/var/lib/kubelet/pods/d0f22b7d-83b7-4d5c-b56b-98a1d4edfa03/volumes" Jan 29 12:24:25 crc kubenswrapper[4852]: I0129 12:24:25.918283 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Jan 29 12:24:26 crc kubenswrapper[4852]: I0129 12:24:26.611439 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.111:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.111:8080: connect: connection refused" Jan 29 12:24:28 crc kubenswrapper[4852]: I0129 12:24:28.037683 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-ghr6b"] Jan 29 12:24:28 crc kubenswrapper[4852]: I0129 12:24:28.050988 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-ghr6b"] Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.317429 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.328025 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.347834 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.453022 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.453216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.453510 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gksfk\" (UniqueName: \"kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.475241 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a3ec239-c977-4064-9544-1075adddf3d1" path="/var/lib/kubelet/pods/7a3ec239-c977-4064-9544-1075adddf3d1/volumes" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.556073 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gksfk\" (UniqueName: \"kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.556226 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.556316 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.557388 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.557654 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.579235 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gksfk\" (UniqueName: \"kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk\") pod \"community-operators-dn6d8\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:29 crc kubenswrapper[4852]: I0129 12:24:29.645868 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.017230 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.017527 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.017576 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.018370 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.018429 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719" gracePeriod=600 Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.243637 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.961955 4852 generic.go:334] "Generic (PLEG): container finished" podID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerID="bbd211bd84e745f77f4b74d6f1517684361226608b65c436d74c5a7d7e15bc26" exitCode=0 Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.962247 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerDied","Data":"bbd211bd84e745f77f4b74d6f1517684361226608b65c436d74c5a7d7e15bc26"} Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.962273 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerStarted","Data":"d913acb8defcae6c80abc4871d9228f023df68eaef67d99358bcb022a4d13eee"} Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.966019 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719" exitCode=0 Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.966413 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719"} Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.966602 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751"} Jan 29 12:24:30 crc kubenswrapper[4852]: I0129 12:24:30.966628 4852 scope.go:117] "RemoveContainer" containerID="9860937fd4de3b15121f040e8bbc76a02038f5949a6ad9d4ca64ecdaa2b7315a" Jan 29 12:24:35 crc kubenswrapper[4852]: I0129 12:24:35.014126 4852 generic.go:334] "Generic (PLEG): container finished" podID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerID="d6f25dbc785d484eae0c525f545ccf4d50d192fbd5cf8c77c95201cd02c4d10b" exitCode=0 Jan 29 12:24:35 crc kubenswrapper[4852]: I0129 12:24:35.014253 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerDied","Data":"d6f25dbc785d484eae0c525f545ccf4d50d192fbd5cf8c77c95201cd02c4d10b"} Jan 29 12:24:38 crc kubenswrapper[4852]: I0129 12:24:38.114910 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:24:38 crc kubenswrapper[4852]: I0129 12:24:38.824182 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:24:39 crc kubenswrapper[4852]: I0129 12:24:39.762088 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:24:40 crc kubenswrapper[4852]: I0129 12:24:40.777567 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:24:40 crc kubenswrapper[4852]: I0129 12:24:40.862882 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:24:40 crc kubenswrapper[4852]: I0129 12:24:40.863077 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon-log" containerID="cri-o://6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c" gracePeriod=30 Jan 29 12:24:40 crc kubenswrapper[4852]: I0129 12:24:40.863202 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" containerID="cri-o://b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7" gracePeriod=30 Jan 29 12:24:45 crc kubenswrapper[4852]: I0129 12:24:45.120674 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerDied","Data":"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7"} Jan 29 12:24:45 crc kubenswrapper[4852]: I0129 12:24:45.120664 4852 generic.go:334] "Generic (PLEG): container finished" podID="39274d66-6aa0-4214-830d-044100a544ca" containerID="b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7" exitCode=0 Jan 29 12:24:45 crc kubenswrapper[4852]: I0129 12:24:45.916497 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Jan 29 12:24:46 crc kubenswrapper[4852]: I0129 12:24:46.138108 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerStarted","Data":"852ccfdb32c85ceae5cc14c2b2eb4ee5fd8d4c8ef5dd314ebfe6eeafe5fa6723"} Jan 29 12:24:46 crc kubenswrapper[4852]: I0129 12:24:46.175673 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dn6d8" podStartSLOduration=2.83851691 podStartE2EDuration="17.17565491s" podCreationTimestamp="2026-01-29 12:24:29 +0000 UTC" firstStartedPulling="2026-01-29 12:24:30.963940533 +0000 UTC m=+6168.181271667" lastFinishedPulling="2026-01-29 12:24:45.301078533 +0000 UTC m=+6182.518409667" observedRunningTime="2026-01-29 12:24:46.1719229 +0000 UTC m=+6183.389254044" watchObservedRunningTime="2026-01-29 12:24:46.17565491 +0000 UTC m=+6183.392986064" Jan 29 12:24:49 crc kubenswrapper[4852]: I0129 12:24:49.646439 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:49 crc kubenswrapper[4852]: I0129 12:24:49.647068 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:49 crc kubenswrapper[4852]: I0129 12:24:49.718387 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.295609 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.339157 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-545cbcccb9-rs75k"] Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.341130 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.354364 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-545cbcccb9-rs75k"] Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.365638 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.440397 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-scripts\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.440479 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-horizon-secret-key\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.440610 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-logs\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.440743 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2cqm\" (UniqueName: \"kubernetes.io/projected/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-kube-api-access-m2cqm\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.441090 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-config-data\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.543419 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-scripts\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.543499 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-horizon-secret-key\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.543529 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-logs\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.543569 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2cqm\" (UniqueName: \"kubernetes.io/projected/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-kube-api-access-m2cqm\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.543683 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-config-data\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.544153 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-logs\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.544355 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-scripts\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.544881 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-config-data\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.562834 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-horizon-secret-key\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.570205 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2cqm\" (UniqueName: \"kubernetes.io/projected/1552da5b-1c75-4b36-9e71-ea8cd1c9af06-kube-api-access-m2cqm\") pod \"horizon-545cbcccb9-rs75k\" (UID: \"1552da5b-1c75-4b36-9e71-ea8cd1c9af06\") " pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:50 crc kubenswrapper[4852]: I0129 12:24:50.675152 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.151071 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-545cbcccb9-rs75k"] Jan 29 12:24:51 crc kubenswrapper[4852]: W0129 12:24:51.158439 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1552da5b_1c75_4b36_9e71_ea8cd1c9af06.slice/crio-1e97820c6a2a929e81bac736bd3a2987790ba7c320ca4c44d9a8ab2bd4b2bd4e WatchSource:0}: Error finding container 1e97820c6a2a929e81bac736bd3a2987790ba7c320ca4c44d9a8ab2bd4b2bd4e: Status 404 returned error can't find the container with id 1e97820c6a2a929e81bac736bd3a2987790ba7c320ca4c44d9a8ab2bd4b2bd4e Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.189068 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545cbcccb9-rs75k" event={"ID":"1552da5b-1c75-4b36-9e71-ea8cd1c9af06","Type":"ContainerStarted","Data":"1e97820c6a2a929e81bac736bd3a2987790ba7c320ca4c44d9a8ab2bd4b2bd4e"} Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.737705 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-ckqhp"] Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.741047 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.760006 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-ckqhp"] Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.828317 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-1039-account-create-update-887rr"] Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.829974 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.832727 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.854833 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-1039-account-create-update-887rr"] Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.872225 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdmn4\" (UniqueName: \"kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.872315 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.973795 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kmbn\" (UniqueName: \"kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.973869 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdmn4\" (UniqueName: \"kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.973927 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.973985 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.974842 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:51 crc kubenswrapper[4852]: I0129 12:24:51.990516 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdmn4\" (UniqueName: \"kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4\") pod \"heat-db-create-ckqhp\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.079237 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.079549 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.079551 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kmbn\" (UniqueName: \"kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.080641 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.102169 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kmbn\" (UniqueName: \"kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn\") pod \"heat-1039-account-create-update-887rr\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.152855 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.203146 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dn6d8" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="registry-server" containerID="cri-o://852ccfdb32c85ceae5cc14c2b2eb4ee5fd8d4c8ef5dd314ebfe6eeafe5fa6723" gracePeriod=2 Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.204349 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545cbcccb9-rs75k" event={"ID":"1552da5b-1c75-4b36-9e71-ea8cd1c9af06","Type":"ContainerStarted","Data":"49f5d38af921881b1340118b350f08773384d6bd1b3cdd8ea7ac6d337777d713"} Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.204381 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545cbcccb9-rs75k" event={"ID":"1552da5b-1c75-4b36-9e71-ea8cd1c9af06","Type":"ContainerStarted","Data":"f54779ff5485ef46d818112861e6a61a7ded08b72512a527244386874ddea639"} Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.236897 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-545cbcccb9-rs75k" podStartSLOduration=2.236881017 podStartE2EDuration="2.236881017s" podCreationTimestamp="2026-01-29 12:24:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:52.231315481 +0000 UTC m=+6189.448646605" watchObservedRunningTime="2026-01-29 12:24:52.236881017 +0000 UTC m=+6189.454212151" Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.602152 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-ckqhp"] Jan 29 12:24:52 crc kubenswrapper[4852]: I0129 12:24:52.698292 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-1039-account-create-update-887rr"] Jan 29 12:24:52 crc kubenswrapper[4852]: W0129 12:24:52.710828 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8d97fec_53d7_4bb5_95ee_665b47fb821e.slice/crio-6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1 WatchSource:0}: Error finding container 6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1: Status 404 returned error can't find the container with id 6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1 Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.215575 4852 generic.go:334] "Generic (PLEG): container finished" podID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerID="852ccfdb32c85ceae5cc14c2b2eb4ee5fd8d4c8ef5dd314ebfe6eeafe5fa6723" exitCode=0 Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.215685 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerDied","Data":"852ccfdb32c85ceae5cc14c2b2eb4ee5fd8d4c8ef5dd314ebfe6eeafe5fa6723"} Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.217626 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-ckqhp" event={"ID":"c48eb14d-cbad-4154-a588-7271c69721af","Type":"ContainerStarted","Data":"85ce1cec0657b57ab134b72efe8d35272e18575cfc625fc0e1811ce901743956"} Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.217916 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-ckqhp" event={"ID":"c48eb14d-cbad-4154-a588-7271c69721af","Type":"ContainerStarted","Data":"d0beff7cc6fb693708590b1e65f67f6f5c5ac01945439fccc001d73f1014c42b"} Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.223464 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-1039-account-create-update-887rr" event={"ID":"d8d97fec-53d7-4bb5-95ee-665b47fb821e","Type":"ContainerStarted","Data":"ea85d8a31b5145744c386a68641cbbd734d18fa7b09c82f25f388f12094d81bb"} Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.223511 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-1039-account-create-update-887rr" event={"ID":"d8d97fec-53d7-4bb5-95ee-665b47fb821e","Type":"ContainerStarted","Data":"6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1"} Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.244137 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-ckqhp" podStartSLOduration=2.24411919 podStartE2EDuration="2.24411919s" podCreationTimestamp="2026-01-29 12:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:53.242570102 +0000 UTC m=+6190.459901236" watchObservedRunningTime="2026-01-29 12:24:53.24411919 +0000 UTC m=+6190.461450314" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.259960 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-1039-account-create-update-887rr" podStartSLOduration=2.259945146 podStartE2EDuration="2.259945146s" podCreationTimestamp="2026-01-29 12:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:24:53.255228471 +0000 UTC m=+6190.472559605" watchObservedRunningTime="2026-01-29 12:24:53.259945146 +0000 UTC m=+6190.477276280" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.272725 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.446395 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities\") pod \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.446479 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gksfk\" (UniqueName: \"kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk\") pod \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.446607 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content\") pod \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\" (UID: \"a792ce1c-1dfe-479b-a9e8-a96a861672a0\") " Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.448821 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities" (OuterVolumeSpecName: "utilities") pod "a792ce1c-1dfe-479b-a9e8-a96a861672a0" (UID: "a792ce1c-1dfe-479b-a9e8-a96a861672a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.456814 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk" (OuterVolumeSpecName: "kube-api-access-gksfk") pod "a792ce1c-1dfe-479b-a9e8-a96a861672a0" (UID: "a792ce1c-1dfe-479b-a9e8-a96a861672a0"). InnerVolumeSpecName "kube-api-access-gksfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.505454 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a792ce1c-1dfe-479b-a9e8-a96a861672a0" (UID: "a792ce1c-1dfe-479b-a9e8-a96a861672a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.549470 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.549512 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gksfk\" (UniqueName: \"kubernetes.io/projected/a792ce1c-1dfe-479b-a9e8-a96a861672a0-kube-api-access-gksfk\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:53 crc kubenswrapper[4852]: I0129 12:24:53.549526 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a792ce1c-1dfe-479b-a9e8-a96a861672a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.237806 4852 generic.go:334] "Generic (PLEG): container finished" podID="d8d97fec-53d7-4bb5-95ee-665b47fb821e" containerID="ea85d8a31b5145744c386a68641cbbd734d18fa7b09c82f25f388f12094d81bb" exitCode=0 Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.238273 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-1039-account-create-update-887rr" event={"ID":"d8d97fec-53d7-4bb5-95ee-665b47fb821e","Type":"ContainerDied","Data":"ea85d8a31b5145744c386a68641cbbd734d18fa7b09c82f25f388f12094d81bb"} Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.248476 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dn6d8" event={"ID":"a792ce1c-1dfe-479b-a9e8-a96a861672a0","Type":"ContainerDied","Data":"d913acb8defcae6c80abc4871d9228f023df68eaef67d99358bcb022a4d13eee"} Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.248522 4852 scope.go:117] "RemoveContainer" containerID="852ccfdb32c85ceae5cc14c2b2eb4ee5fd8d4c8ef5dd314ebfe6eeafe5fa6723" Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.248633 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dn6d8" Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.257878 4852 generic.go:334] "Generic (PLEG): container finished" podID="c48eb14d-cbad-4154-a588-7271c69721af" containerID="85ce1cec0657b57ab134b72efe8d35272e18575cfc625fc0e1811ce901743956" exitCode=0 Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.258158 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-ckqhp" event={"ID":"c48eb14d-cbad-4154-a588-7271c69721af","Type":"ContainerDied","Data":"85ce1cec0657b57ab134b72efe8d35272e18575cfc625fc0e1811ce901743956"} Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.301049 4852 scope.go:117] "RemoveContainer" containerID="d6f25dbc785d484eae0c525f545ccf4d50d192fbd5cf8c77c95201cd02c4d10b" Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.309011 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.318443 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dn6d8"] Jan 29 12:24:54 crc kubenswrapper[4852]: I0129 12:24:54.328775 4852 scope.go:117] "RemoveContainer" containerID="bbd211bd84e745f77f4b74d6f1517684361226608b65c436d74c5a7d7e15bc26" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.493651 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" path="/var/lib/kubelet/pods/a792ce1c-1dfe-479b-a9e8-a96a861672a0/volumes" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.670532 4852 scope.go:117] "RemoveContainer" containerID="3f82b0084eef0b8949f74951e617a03c52f60e51c4815c8130f1e258fa227b84" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.783384 4852 scope.go:117] "RemoveContainer" containerID="5d27a78500353e928d3111a281b88b7cbd9e9bb3fea8cb1925e1eff87788016e" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.787027 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.804578 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.824213 4852 scope.go:117] "RemoveContainer" containerID="675351371d42be5732231c876d522e15356c0483d055226ac9029470a8952029" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.907964 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kmbn\" (UniqueName: \"kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn\") pod \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.908297 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts\") pod \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\" (UID: \"d8d97fec-53d7-4bb5-95ee-665b47fb821e\") " Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.908378 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdmn4\" (UniqueName: \"kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4\") pod \"c48eb14d-cbad-4154-a588-7271c69721af\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.908477 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts\") pod \"c48eb14d-cbad-4154-a588-7271c69721af\" (UID: \"c48eb14d-cbad-4154-a588-7271c69721af\") " Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.909265 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c48eb14d-cbad-4154-a588-7271c69721af" (UID: "c48eb14d-cbad-4154-a588-7271c69721af"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.909605 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8d97fec-53d7-4bb5-95ee-665b47fb821e" (UID: "d8d97fec-53d7-4bb5-95ee-665b47fb821e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.915574 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn" (OuterVolumeSpecName: "kube-api-access-5kmbn") pod "d8d97fec-53d7-4bb5-95ee-665b47fb821e" (UID: "d8d97fec-53d7-4bb5-95ee-665b47fb821e"). InnerVolumeSpecName "kube-api-access-5kmbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.915605 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Jan 29 12:24:55 crc kubenswrapper[4852]: I0129 12:24:55.915747 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4" (OuterVolumeSpecName: "kube-api-access-hdmn4") pod "c48eb14d-cbad-4154-a588-7271c69721af" (UID: "c48eb14d-cbad-4154-a588-7271c69721af"). InnerVolumeSpecName "kube-api-access-hdmn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.011186 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c48eb14d-cbad-4154-a588-7271c69721af-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.011226 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kmbn\" (UniqueName: \"kubernetes.io/projected/d8d97fec-53d7-4bb5-95ee-665b47fb821e-kube-api-access-5kmbn\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.011239 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8d97fec-53d7-4bb5-95ee-665b47fb821e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.011248 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdmn4\" (UniqueName: \"kubernetes.io/projected/c48eb14d-cbad-4154-a588-7271c69721af-kube-api-access-hdmn4\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.295569 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-1039-account-create-update-887rr" event={"ID":"d8d97fec-53d7-4bb5-95ee-665b47fb821e","Type":"ContainerDied","Data":"6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1"} Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.295668 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d0f9553034ff9e16b774788f6b6a340757529b4a857637f19e32975549665f1" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.295756 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-1039-account-create-update-887rr" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.300792 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-ckqhp" event={"ID":"c48eb14d-cbad-4154-a588-7271c69721af","Type":"ContainerDied","Data":"d0beff7cc6fb693708590b1e65f67f6f5c5ac01945439fccc001d73f1014c42b"} Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.300833 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0beff7cc6fb693708590b1e65f67f6f5c5ac01945439fccc001d73f1014c42b" Jan 29 12:24:56 crc kubenswrapper[4852]: I0129 12:24:56.300899 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-ckqhp" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.108257 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-tppch"] Jan 29 12:24:57 crc kubenswrapper[4852]: E0129 12:24:57.109457 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="extract-utilities" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109477 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="extract-utilities" Jan 29 12:24:57 crc kubenswrapper[4852]: E0129 12:24:57.109496 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="extract-content" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109502 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="extract-content" Jan 29 12:24:57 crc kubenswrapper[4852]: E0129 12:24:57.109520 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c48eb14d-cbad-4154-a588-7271c69721af" containerName="mariadb-database-create" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109528 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c48eb14d-cbad-4154-a588-7271c69721af" containerName="mariadb-database-create" Jan 29 12:24:57 crc kubenswrapper[4852]: E0129 12:24:57.109560 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d97fec-53d7-4bb5-95ee-665b47fb821e" containerName="mariadb-account-create-update" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109567 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d97fec-53d7-4bb5-95ee-665b47fb821e" containerName="mariadb-account-create-update" Jan 29 12:24:57 crc kubenswrapper[4852]: E0129 12:24:57.109609 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="registry-server" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109626 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="registry-server" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109863 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c48eb14d-cbad-4154-a588-7271c69721af" containerName="mariadb-database-create" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109888 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8d97fec-53d7-4bb5-95ee-665b47fb821e" containerName="mariadb-account-create-update" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.109898 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a792ce1c-1dfe-479b-a9e8-a96a861672a0" containerName="registry-server" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.110653 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.112573 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.114220 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-mkkdf" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.118214 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-tppch"] Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.232527 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnxt8\" (UniqueName: \"kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.232670 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.232780 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.334471 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.334880 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnxt8\" (UniqueName: \"kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.334970 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.345198 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.345671 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.355280 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnxt8\" (UniqueName: \"kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8\") pod \"heat-db-sync-tppch\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.426886 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-tppch" Jan 29 12:24:57 crc kubenswrapper[4852]: I0129 12:24:57.963898 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-tppch"] Jan 29 12:24:58 crc kubenswrapper[4852]: I0129 12:24:58.318326 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-tppch" event={"ID":"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d","Type":"ContainerStarted","Data":"750358c6fa250cad1590c252add126a5e0dfa145f86a6c73963022aeaf8f1ac3"} Jan 29 12:25:00 crc kubenswrapper[4852]: I0129 12:25:00.676356 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:25:00 crc kubenswrapper[4852]: I0129 12:25:00.677028 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:25:05 crc kubenswrapper[4852]: I0129 12:25:05.918085 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6978b99b85-59x2d" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.110:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.110:8080: connect: connection refused" Jan 29 12:25:05 crc kubenswrapper[4852]: I0129 12:25:05.918930 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:25:06 crc kubenswrapper[4852]: I0129 12:25:06.400302 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-tppch" event={"ID":"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d","Type":"ContainerStarted","Data":"d02d7c6a1b9376ce7b946e455057c54c8865e649823a32179521c8249cf4afb5"} Jan 29 12:25:06 crc kubenswrapper[4852]: I0129 12:25:06.424615 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-tppch" podStartSLOduration=1.5779417279999999 podStartE2EDuration="9.424568473s" podCreationTimestamp="2026-01-29 12:24:57 +0000 UTC" firstStartedPulling="2026-01-29 12:24:57.983896782 +0000 UTC m=+6195.201227916" lastFinishedPulling="2026-01-29 12:25:05.830523527 +0000 UTC m=+6203.047854661" observedRunningTime="2026-01-29 12:25:06.418638049 +0000 UTC m=+6203.635969253" watchObservedRunningTime="2026-01-29 12:25:06.424568473 +0000 UTC m=+6203.641899647" Jan 29 12:25:08 crc kubenswrapper[4852]: I0129 12:25:08.428805 4852 generic.go:334] "Generic (PLEG): container finished" podID="fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" containerID="d02d7c6a1b9376ce7b946e455057c54c8865e649823a32179521c8249cf4afb5" exitCode=0 Jan 29 12:25:08 crc kubenswrapper[4852]: I0129 12:25:08.428918 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-tppch" event={"ID":"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d","Type":"ContainerDied","Data":"d02d7c6a1b9376ce7b946e455057c54c8865e649823a32179521c8249cf4afb5"} Jan 29 12:25:08 crc kubenswrapper[4852]: I0129 12:25:08.895027 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:25:08 crc kubenswrapper[4852]: I0129 12:25:08.898559 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:08 crc kubenswrapper[4852]: I0129 12:25:08.918528 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.007800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.007977 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fvjd\" (UniqueName: \"kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.008031 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.110289 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.110568 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fvjd\" (UniqueName: \"kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.110691 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.111926 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.112573 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.152259 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fvjd\" (UniqueName: \"kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd\") pod \"redhat-operators-n5tlh\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.221940 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.724692 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:25:09 crc kubenswrapper[4852]: W0129 12:25:09.727574 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod217bcf06_4848_41c0_9a71_6cf0fdf5e554.slice/crio-9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09 WatchSource:0}: Error finding container 9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09: Status 404 returned error can't find the container with id 9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09 Jan 29 12:25:09 crc kubenswrapper[4852]: I0129 12:25:09.913631 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-tppch" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.034220 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnxt8\" (UniqueName: \"kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8\") pod \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.034284 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle\") pod \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.034479 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data\") pod \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\" (UID: \"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d\") " Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.047502 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8" (OuterVolumeSpecName: "kube-api-access-jnxt8") pod "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" (UID: "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d"). InnerVolumeSpecName "kube-api-access-jnxt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.074803 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" (UID: "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.134718 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data" (OuterVolumeSpecName: "config-data") pod "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" (UID: "fd41f0b6-b8cb-489a-89a1-9daa4f8f881d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.137672 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnxt8\" (UniqueName: \"kubernetes.io/projected/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-kube-api-access-jnxt8\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.137706 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.137716 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.450260 4852 generic.go:334] "Generic (PLEG): container finished" podID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerID="0f55abbd93b4226b1ebf16db1880aa58ffe2bb8d5d16d82970d78f577ca6fbbc" exitCode=0 Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.450384 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerDied","Data":"0f55abbd93b4226b1ebf16db1880aa58ffe2bb8d5d16d82970d78f577ca6fbbc"} Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.450685 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerStarted","Data":"9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09"} Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.452616 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-tppch" event={"ID":"fd41f0b6-b8cb-489a-89a1-9daa4f8f881d","Type":"ContainerDied","Data":"750358c6fa250cad1590c252add126a5e0dfa145f86a6c73963022aeaf8f1ac3"} Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.452682 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="750358c6fa250cad1590c252add126a5e0dfa145f86a6c73963022aeaf8f1ac3" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.452820 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-tppch" Jan 29 12:25:10 crc kubenswrapper[4852]: I0129 12:25:10.677825 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-545cbcccb9-rs75k" podUID="1552da5b-1c75-4b36-9e71-ea8cd1c9af06" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.115:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.115:8080: connect: connection refused" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.329873 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.467164 4852 generic.go:334] "Generic (PLEG): container finished" podID="39274d66-6aa0-4214-830d-044100a544ca" containerID="6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c" exitCode=137 Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.467529 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6978b99b85-59x2d" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.469278 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key\") pod \"39274d66-6aa0-4214-830d-044100a544ca\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.469378 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data\") pod \"39274d66-6aa0-4214-830d-044100a544ca\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.469479 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd4h9\" (UniqueName: \"kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9\") pod \"39274d66-6aa0-4214-830d-044100a544ca\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.469612 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts\") pod \"39274d66-6aa0-4214-830d-044100a544ca\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.469642 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs\") pod \"39274d66-6aa0-4214-830d-044100a544ca\" (UID: \"39274d66-6aa0-4214-830d-044100a544ca\") " Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.470774 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs" (OuterVolumeSpecName: "logs") pod "39274d66-6aa0-4214-830d-044100a544ca" (UID: "39274d66-6aa0-4214-830d-044100a544ca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.479815 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "39274d66-6aa0-4214-830d-044100a544ca" (UID: "39274d66-6aa0-4214-830d-044100a544ca"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.480786 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9" (OuterVolumeSpecName: "kube-api-access-fd4h9") pod "39274d66-6aa0-4214-830d-044100a544ca" (UID: "39274d66-6aa0-4214-830d-044100a544ca"). InnerVolumeSpecName "kube-api-access-fd4h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.495372 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts" (OuterVolumeSpecName: "scripts") pod "39274d66-6aa0-4214-830d-044100a544ca" (UID: "39274d66-6aa0-4214-830d-044100a544ca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.503546 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data" (OuterVolumeSpecName: "config-data") pod "39274d66-6aa0-4214-830d-044100a544ca" (UID: "39274d66-6aa0-4214-830d-044100a544ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.571945 4852 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39274d66-6aa0-4214-830d-044100a544ca-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.571981 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.571993 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd4h9\" (UniqueName: \"kubernetes.io/projected/39274d66-6aa0-4214-830d-044100a544ca-kube-api-access-fd4h9\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.572002 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39274d66-6aa0-4214-830d-044100a544ca-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.572013 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39274d66-6aa0-4214-830d-044100a544ca-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.593898 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerDied","Data":"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c"} Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.593962 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6978b99b85-59x2d" event={"ID":"39274d66-6aa0-4214-830d-044100a544ca","Type":"ContainerDied","Data":"e744fc61e0cc96ef5ca4ab62e7aeadadbe736c46d8a3f370fa327a65666eb220"} Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.594011 4852 scope.go:117] "RemoveContainer" containerID="b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.832170 4852 scope.go:117] "RemoveContainer" containerID="6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.846275 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-5fbc98c975-bc5tz"] Jan 29 12:25:11 crc kubenswrapper[4852]: E0129 12:25:11.846971 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" containerName="heat-db-sync" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847009 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" containerName="heat-db-sync" Jan 29 12:25:11 crc kubenswrapper[4852]: E0129 12:25:11.847027 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847034 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" Jan 29 12:25:11 crc kubenswrapper[4852]: E0129 12:25:11.847043 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon-log" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847050 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon-log" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847340 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" containerName="heat-db-sync" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847369 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.847403 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="39274d66-6aa0-4214-830d-044100a544ca" containerName="horizon-log" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.848289 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.856232 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-mkkdf" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.856299 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.856843 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.897261 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.941869 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6978b99b85-59x2d"] Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.967269 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5fbc98c975-bc5tz"] Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.979265 4852 scope.go:117] "RemoveContainer" containerID="b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7" Jan 29 12:25:11 crc kubenswrapper[4852]: E0129 12:25:11.979770 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7\": container with ID starting with b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7 not found: ID does not exist" containerID="b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.979798 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7"} err="failed to get container status \"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7\": rpc error: code = NotFound desc = could not find container \"b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7\": container with ID starting with b6aa7b9a6416ec06a32b5f1b97a6af82793c3f9a96ae309f2ae0fac571d5c2f7 not found: ID does not exist" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.979834 4852 scope.go:117] "RemoveContainer" containerID="6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c" Jan 29 12:25:11 crc kubenswrapper[4852]: E0129 12:25:11.980046 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c\": container with ID starting with 6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c not found: ID does not exist" containerID="6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.980067 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c"} err="failed to get container status \"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c\": rpc error: code = NotFound desc = could not find container \"6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c\": container with ID starting with 6eb89f337135360ab5680460f3ef78c54381992ca45ae07531bc1a8ed98a384c not found: ID does not exist" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.985352 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-bdb79664c-nwljl"] Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.987084 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.991674 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.993019 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-combined-ca-bundle\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.993120 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxfr2\" (UniqueName: \"kubernetes.io/projected/1c8bf67c-6118-43f4-8de8-ee434145007c-kube-api-access-kxfr2\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.993185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.993423 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data-custom\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:11 crc kubenswrapper[4852]: I0129 12:25:11.994814 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-bdb79664c-nwljl"] Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.067777 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5fbdc7b9-2dk72"] Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.069361 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5fbdc7b9-2dk72"] Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.069548 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.087808 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102481 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data-custom\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102646 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxfr2\" (UniqueName: \"kubernetes.io/projected/1c8bf67c-6118-43f4-8de8-ee434145007c-kube-api-access-kxfr2\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102691 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102743 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102780 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf2nk\" (UniqueName: \"kubernetes.io/projected/ac1eac8d-5358-4cc0-940f-36a201478932-kube-api-access-zf2nk\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102862 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data-custom\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102917 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-combined-ca-bundle\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.102953 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-combined-ca-bundle\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.111422 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.126372 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-combined-ca-bundle\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.126433 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c8bf67c-6118-43f4-8de8-ee434145007c-config-data-custom\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.150243 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxfr2\" (UniqueName: \"kubernetes.io/projected/1c8bf67c-6118-43f4-8de8-ee434145007c-kube-api-access-kxfr2\") pod \"heat-engine-5fbc98c975-bc5tz\" (UID: \"1c8bf67c-6118-43f4-8de8-ee434145007c\") " pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.201886 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204609 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-combined-ca-bundle\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204676 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data-custom\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204745 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data-custom\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204783 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204819 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-combined-ca-bundle\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204864 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hwwm\" (UniqueName: \"kubernetes.io/projected/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-kube-api-access-5hwwm\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204933 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.204988 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf2nk\" (UniqueName: \"kubernetes.io/projected/ac1eac8d-5358-4cc0-940f-36a201478932-kube-api-access-zf2nk\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.209214 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-combined-ca-bundle\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.209704 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data-custom\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.211665 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac1eac8d-5358-4cc0-940f-36a201478932-config-data\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.232339 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf2nk\" (UniqueName: \"kubernetes.io/projected/ac1eac8d-5358-4cc0-940f-36a201478932-kube-api-access-zf2nk\") pod \"heat-api-bdb79664c-nwljl\" (UID: \"ac1eac8d-5358-4cc0-940f-36a201478932\") " pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.309165 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data-custom\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.309947 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.309977 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-combined-ca-bundle\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.310030 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hwwm\" (UniqueName: \"kubernetes.io/projected/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-kube-api-access-5hwwm\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.315427 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.316787 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-combined-ca-bundle\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.316789 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-config-data-custom\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.331213 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.334695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hwwm\" (UniqueName: \"kubernetes.io/projected/6bc0a69d-dab1-4196-8f3f-924b88ea5a79-kube-api-access-5hwwm\") pod \"heat-cfnapi-5fbdc7b9-2dk72\" (UID: \"6bc0a69d-dab1-4196-8f3f-924b88ea5a79\") " pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.483740 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.487807 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerStarted","Data":"97084f378b0bc134c9d8dbd0a24332eea492c3a9640d920b48590e19ac83d9d9"} Jan 29 12:25:12 crc kubenswrapper[4852]: I0129 12:25:12.835871 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5fbc98c975-bc5tz"] Jan 29 12:25:13 crc kubenswrapper[4852]: I0129 12:25:13.012470 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-bdb79664c-nwljl"] Jan 29 12:25:13 crc kubenswrapper[4852]: I0129 12:25:13.474232 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39274d66-6aa0-4214-830d-044100a544ca" path="/var/lib/kubelet/pods/39274d66-6aa0-4214-830d-044100a544ca/volumes" Jan 29 12:25:13 crc kubenswrapper[4852]: I0129 12:25:13.501610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5fbc98c975-bc5tz" event={"ID":"1c8bf67c-6118-43f4-8de8-ee434145007c","Type":"ContainerStarted","Data":"2474824aa47e6acb27184222baaa1e60dee453effb412193336fd36f6c9fac1c"} Jan 29 12:25:13 crc kubenswrapper[4852]: I0129 12:25:13.502856 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-bdb79664c-nwljl" event={"ID":"ac1eac8d-5358-4cc0-940f-36a201478932","Type":"ContainerStarted","Data":"0f829f2b46c79b46a5e717635f9429fc3733b6655c2f683e502f591fff9a73fc"} Jan 29 12:25:13 crc kubenswrapper[4852]: I0129 12:25:13.571979 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5fbdc7b9-2dk72"] Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.516734 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" event={"ID":"6bc0a69d-dab1-4196-8f3f-924b88ea5a79","Type":"ContainerStarted","Data":"2466625559a4a3e3a524ce91f0aa9c3c174e12fd67312a48631def403d84fa07"} Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.519102 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5fbc98c975-bc5tz" event={"ID":"1c8bf67c-6118-43f4-8de8-ee434145007c","Type":"ContainerStarted","Data":"94076e25df5df2cb45beacdb7fe618659d823295adb99ae08e176ef8542b7739"} Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.520527 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.523381 4852 generic.go:334] "Generic (PLEG): container finished" podID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerID="97084f378b0bc134c9d8dbd0a24332eea492c3a9640d920b48590e19ac83d9d9" exitCode=0 Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.523441 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerDied","Data":"97084f378b0bc134c9d8dbd0a24332eea492c3a9640d920b48590e19ac83d9d9"} Jan 29 12:25:14 crc kubenswrapper[4852]: I0129 12:25:14.546721 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-5fbc98c975-bc5tz" podStartSLOduration=3.546697537 podStartE2EDuration="3.546697537s" podCreationTimestamp="2026-01-29 12:25:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:25:14.534046028 +0000 UTC m=+6211.751377162" watchObservedRunningTime="2026-01-29 12:25:14.546697537 +0000 UTC m=+6211.764028681" Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.635148 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" event={"ID":"6bc0a69d-dab1-4196-8f3f-924b88ea5a79","Type":"ContainerStarted","Data":"19d9d9943a192069f3e71b1811e4201b9ebee5dda6e87ddf2b1adeae331da3b5"} Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.636901 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.652772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-bdb79664c-nwljl" event={"ID":"ac1eac8d-5358-4cc0-940f-36a201478932","Type":"ContainerStarted","Data":"9da3588e92571a1b205a7f988cc1e0273ad6c22a31485a2d92f7e1d1e368cf91"} Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.653653 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.674838 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerStarted","Data":"1f4cebec076485946127332ed10da33e7ab3dedc03606ef2361c38dfb8e29246"} Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.683768 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" podStartSLOduration=2.275444323 podStartE2EDuration="10.683745815s" podCreationTimestamp="2026-01-29 12:25:12 +0000 UTC" firstStartedPulling="2026-01-29 12:25:13.567634532 +0000 UTC m=+6210.784965666" lastFinishedPulling="2026-01-29 12:25:21.975936024 +0000 UTC m=+6219.193267158" observedRunningTime="2026-01-29 12:25:22.659406912 +0000 UTC m=+6219.876738046" watchObservedRunningTime="2026-01-29 12:25:22.683745815 +0000 UTC m=+6219.901076949" Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.697054 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-bdb79664c-nwljl" podStartSLOduration=3.117829228 podStartE2EDuration="11.697029918s" podCreationTimestamp="2026-01-29 12:25:11 +0000 UTC" firstStartedPulling="2026-01-29 12:25:13.038310974 +0000 UTC m=+6210.255642108" lastFinishedPulling="2026-01-29 12:25:21.617511644 +0000 UTC m=+6218.834842798" observedRunningTime="2026-01-29 12:25:22.681986172 +0000 UTC m=+6219.899317306" watchObservedRunningTime="2026-01-29 12:25:22.697029918 +0000 UTC m=+6219.914361052" Jan 29 12:25:22 crc kubenswrapper[4852]: I0129 12:25:22.754103 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-n5tlh" podStartSLOduration=3.241161589 podStartE2EDuration="14.754043399s" podCreationTimestamp="2026-01-29 12:25:08 +0000 UTC" firstStartedPulling="2026-01-29 12:25:10.464531991 +0000 UTC m=+6207.681863125" lastFinishedPulling="2026-01-29 12:25:21.977413801 +0000 UTC m=+6219.194744935" observedRunningTime="2026-01-29 12:25:22.748152056 +0000 UTC m=+6219.965483190" watchObservedRunningTime="2026-01-29 12:25:22.754043399 +0000 UTC m=+6219.971374543" Jan 29 12:25:23 crc kubenswrapper[4852]: I0129 12:25:23.274120 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:25:24 crc kubenswrapper[4852]: I0129 12:25:24.958632 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-545cbcccb9-rs75k" Jan 29 12:25:25 crc kubenswrapper[4852]: I0129 12:25:25.038034 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:25:25 crc kubenswrapper[4852]: I0129 12:25:25.038363 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon-log" containerID="cri-o://a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf" gracePeriod=30 Jan 29 12:25:25 crc kubenswrapper[4852]: I0129 12:25:25.038452 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" containerID="cri-o://42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd" gracePeriod=30 Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.043938 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-5a72-account-create-update-ll6z9"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.059749 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-dq45p"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.071366 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-dq45p"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.081442 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-5a72-account-create-update-ll6z9"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.091358 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-x6fcd"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.100427 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-x6fcd"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.111083 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dvpbl"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.123533 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dvpbl"] Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.474479 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d108b5f-e63a-4812-ae11-eaab4c51fba9" path="/var/lib/kubelet/pods/0d108b5f-e63a-4812-ae11-eaab4c51fba9/volumes" Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.499785 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c3ea17c-df9b-4eb0-8929-9008639d7c79" path="/var/lib/kubelet/pods/2c3ea17c-df9b-4eb0-8929-9008639d7c79/volumes" Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.500411 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="549d7cc5-e0ca-4266-b507-a970c408621f" path="/var/lib/kubelet/pods/549d7cc5-e0ca-4266-b507-a970c408621f/volumes" Jan 29 12:25:27 crc kubenswrapper[4852]: I0129 12:25:27.500960 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb22d7f4-2e3f-4128-acad-5bff9584ebff" path="/var/lib/kubelet/pods/fb22d7f4-2e3f-4128-acad-5bff9584ebff/volumes" Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.040773 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-742c-account-create-update-chpcv"] Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.051662 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-742c-account-create-update-chpcv"] Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.063567 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-fc77-account-create-update-9dmsd"] Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.075728 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-fc77-account-create-update-9dmsd"] Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.195459 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.111:8080/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:60486->10.217.1.111:8080: read: connection reset by peer" Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.734916 4852 generic.go:334] "Generic (PLEG): container finished" podID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerID="42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd" exitCode=0 Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.734980 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerDied","Data":"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd"} Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.832914 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5fbdc7b9-2dk72" Jan 29 12:25:28 crc kubenswrapper[4852]: I0129 12:25:28.844312 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-bdb79664c-nwljl" Jan 29 12:25:29 crc kubenswrapper[4852]: I0129 12:25:29.223131 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:29 crc kubenswrapper[4852]: I0129 12:25:29.223202 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:29 crc kubenswrapper[4852]: I0129 12:25:29.474887 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792" path="/var/lib/kubelet/pods/37d8f8ac-2c54-4c48-b3e4-c9dfc4bb4792/volumes" Jan 29 12:25:29 crc kubenswrapper[4852]: I0129 12:25:29.475490 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62acb4d3-27bc-466d-af1f-35d451699565" path="/var/lib/kubelet/pods/62acb4d3-27bc-466d-af1f-35d451699565/volumes" Jan 29 12:25:30 crc kubenswrapper[4852]: I0129 12:25:30.269324 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5tlh" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" probeResult="failure" output=< Jan 29 12:25:30 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:25:30 crc kubenswrapper[4852]: > Jan 29 12:25:32 crc kubenswrapper[4852]: I0129 12:25:32.251099 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-5fbc98c975-bc5tz" Jan 29 12:25:36 crc kubenswrapper[4852]: I0129 12:25:36.611035 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.111:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.111:8080: connect: connection refused" Jan 29 12:25:38 crc kubenswrapper[4852]: I0129 12:25:38.053605 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qlz62"] Jan 29 12:25:38 crc kubenswrapper[4852]: I0129 12:25:38.061852 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qlz62"] Jan 29 12:25:39 crc kubenswrapper[4852]: I0129 12:25:39.493858 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a883122-db7e-4602-b475-b2763b8e9e41" path="/var/lib/kubelet/pods/3a883122-db7e-4602-b475-b2763b8e9e41/volumes" Jan 29 12:25:40 crc kubenswrapper[4852]: I0129 12:25:40.271767 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5tlh" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" probeResult="failure" output=< Jan 29 12:25:40 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:25:40 crc kubenswrapper[4852]: > Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.117033 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk"] Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.119557 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.121451 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.192404 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk"] Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.206269 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.206570 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.206772 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqhl5\" (UniqueName: \"kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.310288 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.310408 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.310450 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqhl5\" (UniqueName: \"kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.311528 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.311793 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.335970 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqhl5\" (UniqueName: \"kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:44 crc kubenswrapper[4852]: I0129 12:25:44.438325 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:45 crc kubenswrapper[4852]: I0129 12:25:45.011654 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk"] Jan 29 12:25:45 crc kubenswrapper[4852]: W0129 12:25:45.018208 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bc8818d_aaba_4852_8a76_7933da07170d.slice/crio-5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda WatchSource:0}: Error finding container 5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda: Status 404 returned error can't find the container with id 5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda Jan 29 12:25:45 crc kubenswrapper[4852]: I0129 12:25:45.927898 4852 generic.go:334] "Generic (PLEG): container finished" podID="2bc8818d-aaba-4852-8a76-7933da07170d" containerID="0263599f0ee76bc6b1cc762460fefbb97e0899bc44d6a1718a7f352b840063c4" exitCode=0 Jan 29 12:25:45 crc kubenswrapper[4852]: I0129 12:25:45.928244 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerDied","Data":"0263599f0ee76bc6b1cc762460fefbb97e0899bc44d6a1718a7f352b840063c4"} Jan 29 12:25:45 crc kubenswrapper[4852]: I0129 12:25:45.928292 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerStarted","Data":"5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda"} Jan 29 12:25:46 crc kubenswrapper[4852]: I0129 12:25:46.610546 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-85446f64c7-pbx75" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.111:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.111:8080: connect: connection refused" Jan 29 12:25:46 crc kubenswrapper[4852]: I0129 12:25:46.611010 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:25:47 crc kubenswrapper[4852]: I0129 12:25:47.949854 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerStarted","Data":"782312008b938f801589520285d805e5f3e0492c0425f1ee299ff807b96ca122"} Jan 29 12:25:48 crc kubenswrapper[4852]: I0129 12:25:48.963741 4852 generic.go:334] "Generic (PLEG): container finished" podID="2bc8818d-aaba-4852-8a76-7933da07170d" containerID="782312008b938f801589520285d805e5f3e0492c0425f1ee299ff807b96ca122" exitCode=0 Jan 29 12:25:48 crc kubenswrapper[4852]: I0129 12:25:48.963818 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerDied","Data":"782312008b938f801589520285d805e5f3e0492c0425f1ee299ff807b96ca122"} Jan 29 12:25:49 crc kubenswrapper[4852]: I0129 12:25:49.974651 4852 generic.go:334] "Generic (PLEG): container finished" podID="2bc8818d-aaba-4852-8a76-7933da07170d" containerID="6371173f684c6f31a3e9b0745b3f4d190a8d9e80f14c737596d3c796caa74856" exitCode=0 Jan 29 12:25:49 crc kubenswrapper[4852]: I0129 12:25:49.974834 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerDied","Data":"6371173f684c6f31a3e9b0745b3f4d190a8d9e80f14c737596d3c796caa74856"} Jan 29 12:25:50 crc kubenswrapper[4852]: I0129 12:25:50.271154 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5tlh" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" probeResult="failure" output=< Jan 29 12:25:50 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:25:50 crc kubenswrapper[4852]: > Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.374812 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.475307 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle\") pod \"2bc8818d-aaba-4852-8a76-7933da07170d\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.475527 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util\") pod \"2bc8818d-aaba-4852-8a76-7933da07170d\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.475578 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqhl5\" (UniqueName: \"kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5\") pod \"2bc8818d-aaba-4852-8a76-7933da07170d\" (UID: \"2bc8818d-aaba-4852-8a76-7933da07170d\") " Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.478220 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle" (OuterVolumeSpecName: "bundle") pod "2bc8818d-aaba-4852-8a76-7933da07170d" (UID: "2bc8818d-aaba-4852-8a76-7933da07170d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.480669 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5" (OuterVolumeSpecName: "kube-api-access-dqhl5") pod "2bc8818d-aaba-4852-8a76-7933da07170d" (UID: "2bc8818d-aaba-4852-8a76-7933da07170d"). InnerVolumeSpecName "kube-api-access-dqhl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.485569 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util" (OuterVolumeSpecName: "util") pod "2bc8818d-aaba-4852-8a76-7933da07170d" (UID: "2bc8818d-aaba-4852-8a76-7933da07170d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.580079 4852 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.580120 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqhl5\" (UniqueName: \"kubernetes.io/projected/2bc8818d-aaba-4852-8a76-7933da07170d-kube-api-access-dqhl5\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.580132 4852 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bc8818d-aaba-4852-8a76-7933da07170d-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.997658 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" event={"ID":"2bc8818d-aaba-4852-8a76-7933da07170d","Type":"ContainerDied","Data":"5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda"} Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.997707 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f081924f2773a243fbe4815c0de0e6a49c28e60768ecc0fa5857358f6360eda" Jan 29 12:25:51 crc kubenswrapper[4852]: I0129 12:25:51.997780 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk" Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.902981 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.961420 4852 scope.go:117] "RemoveContainer" containerID="cbdf9ac17b2093fe58616ffade16c7c0346d3035d957e0515dd48f990d67c315" Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973068 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs\") pod \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973129 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data\") pod \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973219 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key\") pod \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973282 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rwm9\" (UniqueName: \"kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9\") pod \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973372 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts\") pod \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\" (UID: \"dbaa621d-6bac-4dbc-a5ea-fec780103dde\") " Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.973857 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs" (OuterVolumeSpecName: "logs") pod "dbaa621d-6bac-4dbc-a5ea-fec780103dde" (UID: "dbaa621d-6bac-4dbc-a5ea-fec780103dde"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.989390 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "dbaa621d-6bac-4dbc-a5ea-fec780103dde" (UID: "dbaa621d-6bac-4dbc-a5ea-fec780103dde"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:55 crc kubenswrapper[4852]: I0129 12:25:55.992918 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9" (OuterVolumeSpecName: "kube-api-access-8rwm9") pod "dbaa621d-6bac-4dbc-a5ea-fec780103dde" (UID: "dbaa621d-6bac-4dbc-a5ea-fec780103dde"). InnerVolumeSpecName "kube-api-access-8rwm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.018335 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts" (OuterVolumeSpecName: "scripts") pod "dbaa621d-6bac-4dbc-a5ea-fec780103dde" (UID: "dbaa621d-6bac-4dbc-a5ea-fec780103dde"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.077320 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.077351 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbaa621d-6bac-4dbc-a5ea-fec780103dde-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.077364 4852 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbaa621d-6bac-4dbc-a5ea-fec780103dde-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.077376 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rwm9\" (UniqueName: \"kubernetes.io/projected/dbaa621d-6bac-4dbc-a5ea-fec780103dde-kube-api-access-8rwm9\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.087453 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data" (OuterVolumeSpecName: "config-data") pod "dbaa621d-6bac-4dbc-a5ea-fec780103dde" (UID: "dbaa621d-6bac-4dbc-a5ea-fec780103dde"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.123925 4852 scope.go:117] "RemoveContainer" containerID="c7ec2c2a09b399f9cadeae8b5a7035024cf020020cf6af995fdf1864715ac53d" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.142867 4852 generic.go:334] "Generic (PLEG): container finished" podID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerID="a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf" exitCode=137 Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.142920 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerDied","Data":"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf"} Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.142954 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85446f64c7-pbx75" event={"ID":"dbaa621d-6bac-4dbc-a5ea-fec780103dde","Type":"ContainerDied","Data":"d19824b404be85030fe6bfd4755eddbd591f570a05d12f26bb45d0f801d90857"} Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.142975 4852 scope.go:117] "RemoveContainer" containerID="42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.143239 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85446f64c7-pbx75" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.179377 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbaa621d-6bac-4dbc-a5ea-fec780103dde-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.226982 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.234093 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85446f64c7-pbx75"] Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.440889 4852 scope.go:117] "RemoveContainer" containerID="a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.466604 4852 scope.go:117] "RemoveContainer" containerID="9dd4d775f342b430f2ec9a1b3e48f1e433f665ccb9498108d98cb67709598515" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.573696 4852 scope.go:117] "RemoveContainer" containerID="42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd" Jan 29 12:25:56 crc kubenswrapper[4852]: E0129 12:25:56.580689 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd\": container with ID starting with 42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd not found: ID does not exist" containerID="42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.580730 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd"} err="failed to get container status \"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd\": rpc error: code = NotFound desc = could not find container \"42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd\": container with ID starting with 42fb5ca66e3cc98dec73621b8a2e8c58f726ffbed549d87679a41ec55db6a9dd not found: ID does not exist" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.580756 4852 scope.go:117] "RemoveContainer" containerID="a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf" Jan 29 12:25:56 crc kubenswrapper[4852]: E0129 12:25:56.591738 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf\": container with ID starting with a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf not found: ID does not exist" containerID="a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.591790 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf"} err="failed to get container status \"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf\": rpc error: code = NotFound desc = could not find container \"a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf\": container with ID starting with a0243886aa6a3f9c41bd7a704841fa672972cc60fba33ddc6eaa54211566d1bf not found: ID does not exist" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.633517 4852 scope.go:117] "RemoveContainer" containerID="f6f5ea1f9751a4325a9c6225c6c79ac765f8b59a1bf170e96463b8b0a1fe9b32" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.754515 4852 scope.go:117] "RemoveContainer" containerID="a2fda325225b67306e9adb00fb63c930298a7d27afd6ec1153cc0a58c0845af4" Jan 29 12:25:56 crc kubenswrapper[4852]: I0129 12:25:56.992076 4852 scope.go:117] "RemoveContainer" containerID="222d44b72ab581c36d25f18b1e87ee20d213b40a99b2f0c79580974d9d54fcc7" Jan 29 12:25:57 crc kubenswrapper[4852]: I0129 12:25:57.032544 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qtwww"] Jan 29 12:25:57 crc kubenswrapper[4852]: I0129 12:25:57.045777 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qtwww"] Jan 29 12:25:57 crc kubenswrapper[4852]: I0129 12:25:57.106090 4852 scope.go:117] "RemoveContainer" containerID="542047c4ada8f43f614a05e9be497edca18f831d1275ce9280591ff508071350" Jan 29 12:25:57 crc kubenswrapper[4852]: I0129 12:25:57.475373 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19f8bd0d-d739-4d6e-a48a-e3cbb0f64229" path="/var/lib/kubelet/pods/19f8bd0d-d739-4d6e-a48a-e3cbb0f64229/volumes" Jan 29 12:25:57 crc kubenswrapper[4852]: I0129 12:25:57.680290 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" path="/var/lib/kubelet/pods/dbaa621d-6bac-4dbc-a5ea-fec780103dde/volumes" Jan 29 12:25:58 crc kubenswrapper[4852]: I0129 12:25:58.053891 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-rvw7r"] Jan 29 12:25:58 crc kubenswrapper[4852]: I0129 12:25:58.064539 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-rvw7r"] Jan 29 12:25:59 crc kubenswrapper[4852]: I0129 12:25:59.276866 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:59 crc kubenswrapper[4852]: I0129 12:25:59.332822 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:25:59 crc kubenswrapper[4852]: I0129 12:25:59.484767 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3355180f-9d97-4440-aa5c-5319273300f7" path="/var/lib/kubelet/pods/3355180f-9d97-4440-aa5c-5319273300f7/volumes" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.509486 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.510238 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-n5tlh" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" containerID="cri-o://1f4cebec076485946127332ed10da33e7ab3dedc03606ef2361c38dfb8e29246" gracePeriod=2 Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.942628 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7"] Jan 29 12:26:01 crc kubenswrapper[4852]: E0129 12:26:01.944982 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon-log" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945011 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon-log" Jan 29 12:26:01 crc kubenswrapper[4852]: E0129 12:26:01.945028 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="pull" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945038 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="pull" Jan 29 12:26:01 crc kubenswrapper[4852]: E0129 12:26:01.945052 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="extract" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945060 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="extract" Jan 29 12:26:01 crc kubenswrapper[4852]: E0129 12:26:01.945086 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945094 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" Jan 29 12:26:01 crc kubenswrapper[4852]: E0129 12:26:01.945119 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="util" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945126 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="util" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945402 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945436 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bc8818d-aaba-4852-8a76-7933da07170d" containerName="extract" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.945456 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbaa621d-6bac-4dbc-a5ea-fec780103dde" containerName="horizon-log" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.946405 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.955215 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-c8z5q" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.955548 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.957004 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 29 12:26:01 crc kubenswrapper[4852]: I0129 12:26:01.967485 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.024308 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkztg\" (UniqueName: \"kubernetes.io/projected/a9853c72-f7e7-4ece-b626-e7d5a6fdbef1-kube-api-access-pkztg\") pod \"obo-prometheus-operator-68bc856cb9-wxsg7\" (UID: \"a9853c72-f7e7-4ece-b626-e7d5a6fdbef1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.069412 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.070854 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.073076 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.073413 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-7c5vn" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.082719 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.095469 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.096870 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.114801 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.126340 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkztg\" (UniqueName: \"kubernetes.io/projected/a9853c72-f7e7-4ece-b626-e7d5a6fdbef1-kube-api-access-pkztg\") pod \"obo-prometheus-operator-68bc856cb9-wxsg7\" (UID: \"a9853c72-f7e7-4ece-b626-e7d5a6fdbef1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.187624 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkztg\" (UniqueName: \"kubernetes.io/projected/a9853c72-f7e7-4ece-b626-e7d5a6fdbef1-kube-api-access-pkztg\") pod \"obo-prometheus-operator-68bc856cb9-wxsg7\" (UID: \"a9853c72-f7e7-4ece-b626-e7d5a6fdbef1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.230369 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.230449 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.230528 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.230670 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.276123 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-d85kb"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.277638 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284105 4852 generic.go:334] "Generic (PLEG): container finished" podID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerID="1f4cebec076485946127332ed10da33e7ab3dedc03606ef2361c38dfb8e29246" exitCode=0 Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284156 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerDied","Data":"1f4cebec076485946127332ed10da33e7ab3dedc03606ef2361c38dfb8e29246"} Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284188 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5tlh" event={"ID":"217bcf06-4848-41c0-9a71-6cf0fdf5e554","Type":"ContainerDied","Data":"9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09"} Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284200 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9112d626f9d34669ef8f905b99df8f8263f8d9733c8c8876f98ca171e1d0ce09" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284224 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-kcqxq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.284335 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.299054 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.303293 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-d85kb"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.347306 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372250 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372341 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372390 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4cvx\" (UniqueName: \"kubernetes.io/projected/4418d202-0abd-48f2-8216-49462f1f5e1f-kube-api-access-t4cvx\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372475 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372535 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4418d202-0abd-48f2-8216-49462f1f5e1f-observability-operator-tls\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.372707 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.386513 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.389435 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.389865 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d72564b9-92ba-48b1-ac14-3f7d0c257191-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-pjngj\" (UID: \"d72564b9-92ba-48b1-ac14-3f7d0c257191\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.407574 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a34c5dcb-da69-43ae-9e4b-42a942b3cf40-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6448976d6-z4bpj\" (UID: \"a34c5dcb-da69-43ae-9e4b-42a942b3cf40\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.433713 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.474826 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities\") pod \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.474884 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fvjd\" (UniqueName: \"kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd\") pod \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.475034 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content\") pod \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\" (UID: \"217bcf06-4848-41c0-9a71-6cf0fdf5e554\") " Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.475362 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4cvx\" (UniqueName: \"kubernetes.io/projected/4418d202-0abd-48f2-8216-49462f1f5e1f-kube-api-access-t4cvx\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.475516 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4418d202-0abd-48f2-8216-49462f1f5e1f-observability-operator-tls\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.477679 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities" (OuterVolumeSpecName: "utilities") pod "217bcf06-4848-41c0-9a71-6cf0fdf5e554" (UID: "217bcf06-4848-41c0-9a71-6cf0fdf5e554"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.480000 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.492356 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4418d202-0abd-48f2-8216-49462f1f5e1f-observability-operator-tls\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.495613 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4cvx\" (UniqueName: \"kubernetes.io/projected/4418d202-0abd-48f2-8216-49462f1f5e1f-kube-api-access-t4cvx\") pod \"observability-operator-59bdc8b94-d85kb\" (UID: \"4418d202-0abd-48f2-8216-49462f1f5e1f\") " pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.497029 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2p5pq"] Jan 29 12:26:02 crc kubenswrapper[4852]: E0129 12:26:02.497474 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.497491 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" Jan 29 12:26:02 crc kubenswrapper[4852]: E0129 12:26:02.497520 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="extract-utilities" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.497526 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="extract-utilities" Jan 29 12:26:02 crc kubenswrapper[4852]: E0129 12:26:02.497542 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="extract-content" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.497548 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="extract-content" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.497770 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" containerName="registry-server" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.498652 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.501396 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-zjf96" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.512351 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2p5pq"] Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.524004 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd" (OuterVolumeSpecName: "kube-api-access-7fvjd") pod "217bcf06-4848-41c0-9a71-6cf0fdf5e554" (UID: "217bcf06-4848-41c0-9a71-6cf0fdf5e554"). InnerVolumeSpecName "kube-api-access-7fvjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.582066 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wmjq\" (UniqueName: \"kubernetes.io/projected/db57a9e8-3292-4515-a24d-244418fc98ba-kube-api-access-7wmjq\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.582760 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/db57a9e8-3292-4515-a24d-244418fc98ba-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.582996 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fvjd\" (UniqueName: \"kubernetes.io/projected/217bcf06-4848-41c0-9a71-6cf0fdf5e554-kube-api-access-7fvjd\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.641136 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.650020 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "217bcf06-4848-41c0-9a71-6cf0fdf5e554" (UID: "217bcf06-4848-41c0-9a71-6cf0fdf5e554"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.685783 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wmjq\" (UniqueName: \"kubernetes.io/projected/db57a9e8-3292-4515-a24d-244418fc98ba-kube-api-access-7wmjq\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.686108 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/db57a9e8-3292-4515-a24d-244418fc98ba-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.686308 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/217bcf06-4848-41c0-9a71-6cf0fdf5e554-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.687460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/db57a9e8-3292-4515-a24d-244418fc98ba-openshift-service-ca\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.703752 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.726527 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wmjq\" (UniqueName: \"kubernetes.io/projected/db57a9e8-3292-4515-a24d-244418fc98ba-kube-api-access-7wmjq\") pod \"perses-operator-5bf474d74f-2p5pq\" (UID: \"db57a9e8-3292-4515-a24d-244418fc98ba\") " pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:02 crc kubenswrapper[4852]: I0129 12:26:02.857320 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.185925 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.305865 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.316231 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5tlh" Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.318553 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" event={"ID":"a9853c72-f7e7-4ece-b626-e7d5a6fdbef1","Type":"ContainerStarted","Data":"655faafa181e13fc72e4cbf0e5a61099103ae98d0860389900613d2392aa302d"} Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.384424 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.410567 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-n5tlh"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.476878 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="217bcf06-4848-41c0-9a71-6cf0fdf5e554" path="/var/lib/kubelet/pods/217bcf06-4848-41c0-9a71-6cf0fdf5e554/volumes" Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.562309 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-2p5pq"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.637153 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj"] Jan 29 12:26:03 crc kubenswrapper[4852]: I0129 12:26:03.772698 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-d85kb"] Jan 29 12:26:03 crc kubenswrapper[4852]: W0129 12:26:03.774088 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4418d202_0abd_48f2_8216_49462f1f5e1f.slice/crio-dffa0e6fe18070ad3eeade80060cd395d2a91aa44bd03912e7e31b38b468f414 WatchSource:0}: Error finding container dffa0e6fe18070ad3eeade80060cd395d2a91aa44bd03912e7e31b38b468f414: Status 404 returned error can't find the container with id dffa0e6fe18070ad3eeade80060cd395d2a91aa44bd03912e7e31b38b468f414 Jan 29 12:26:04 crc kubenswrapper[4852]: I0129 12:26:04.329341 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" event={"ID":"d72564b9-92ba-48b1-ac14-3f7d0c257191","Type":"ContainerStarted","Data":"52ce7ede412d3a3305161faa3252f871e57d728ef2c6ca9cbba52ecc6ceb1563"} Jan 29 12:26:04 crc kubenswrapper[4852]: I0129 12:26:04.331836 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" event={"ID":"a34c5dcb-da69-43ae-9e4b-42a942b3cf40","Type":"ContainerStarted","Data":"a824fda979ed9846f6cca56ffe4112de0cc891d51c983165e56f019a87f8d7ac"} Jan 29 12:26:04 crc kubenswrapper[4852]: I0129 12:26:04.335262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" event={"ID":"4418d202-0abd-48f2-8216-49462f1f5e1f","Type":"ContainerStarted","Data":"dffa0e6fe18070ad3eeade80060cd395d2a91aa44bd03912e7e31b38b468f414"} Jan 29 12:26:04 crc kubenswrapper[4852]: I0129 12:26:04.338981 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" event={"ID":"db57a9e8-3292-4515-a24d-244418fc98ba","Type":"ContainerStarted","Data":"aea9da7e29ad7e6b36629d43d7335988bb61c8c728af71b307af40d3961fc197"} Jan 29 12:26:16 crc kubenswrapper[4852]: I0129 12:26:16.037785 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-47mtt"] Jan 29 12:26:16 crc kubenswrapper[4852]: I0129 12:26:16.050716 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-47mtt"] Jan 29 12:26:17 crc kubenswrapper[4852]: I0129 12:26:17.477106 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bf6245c-9a72-4da9-a118-ba98c6495cdc" path="/var/lib/kubelet/pods/0bf6245c-9a72-4da9-a118-ba98c6495cdc/volumes" Jan 29 12:26:19 crc kubenswrapper[4852]: E0129 12:26:19.654735 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a" Jan 29 12:26:19 crc kubenswrapper[4852]: E0129 12:26:19.655290 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a,Command:[],Args:[--prometheus-config-reloader=$(RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER) --prometheus-instance-selector=app.kubernetes.io/managed-by=observability-operator --alertmanager-instance-selector=app.kubernetes.io/managed-by=observability-operator --thanos-ruler-instance-selector=app.kubernetes.io/managed-by=observability-operator --watch-referenced-objects-in-all-namespaces=true --disable-unmanaged-prometheus-configuration=true],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOGC,Value:30,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER,Value:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{157286400 0} {} 150Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pkztg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-68bc856cb9-wxsg7_openshift-operators(a9853c72-f7e7-4ece-b626-e7d5a6fdbef1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:26:19 crc kubenswrapper[4852]: E0129 12:26:19.663180 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" podUID="a9853c72-f7e7-4ece-b626-e7d5a6fdbef1" Jan 29 12:26:20 crc kubenswrapper[4852]: E0129 12:26:20.534451 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a\\\"\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" podUID="a9853c72-f7e7-4ece-b626-e7d5a6fdbef1" Jan 29 12:26:21 crc kubenswrapper[4852]: E0129 12:26:21.698795 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea" Jan 29 12:26:21 crc kubenswrapper[4852]: E0129 12:26:21.699189 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-6448976d6-pjngj_openshift-operators(d72564b9-92ba-48b1-ac14-3f7d0c257191): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:26:21 crc kubenswrapper[4852]: E0129 12:26:21.700799 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" podUID="d72564b9-92ba-48b1-ac14-3f7d0c257191" Jan 29 12:26:22 crc kubenswrapper[4852]: E0129 12:26:22.555504 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" podUID="d72564b9-92ba-48b1-ac14-3f7d0c257191" Jan 29 12:26:22 crc kubenswrapper[4852]: E0129 12:26:22.923040 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8" Jan 29 12:26:22 crc kubenswrapper[4852]: E0129 12:26:22.923270 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7wmjq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5bf474d74f-2p5pq_openshift-operators(db57a9e8-3292-4515-a24d-244418fc98ba): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:26:22 crc kubenswrapper[4852]: E0129 12:26:22.924771 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" podUID="db57a9e8-3292-4515-a24d-244418fc98ba" Jan 29 12:26:23 crc kubenswrapper[4852]: E0129 12:26:23.202162 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea" Jan 29 12:26:23 crc kubenswrapper[4852]: E0129 12:26:23.202330 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-6448976d6-z4bpj_openshift-operators(a34c5dcb-da69-43ae-9e4b-42a942b3cf40): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:26:23 crc kubenswrapper[4852]: E0129 12:26:23.203650 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" podUID="a34c5dcb-da69-43ae-9e4b-42a942b3cf40" Jan 29 12:26:23 crc kubenswrapper[4852]: E0129 12:26:23.563963 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" podUID="a34c5dcb-da69-43ae-9e4b-42a942b3cf40" Jan 29 12:26:23 crc kubenswrapper[4852]: E0129 12:26:23.565374 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:b5c8526d2ae660fe092dd8a7acf18ec4957d5c265890a222f55396fc2cdaeed8\\\"\"" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" podUID="db57a9e8-3292-4515-a24d-244418fc98ba" Jan 29 12:26:26 crc kubenswrapper[4852]: E0129 12:26:26.033567 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:2ecf763b02048d2cf4c17967a7b2cacc7afd6af0e963a39579d876f8f4170e3c" Jan 29 12:26:26 crc kubenswrapper[4852]: E0129 12:26:26.034535 4852 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:2ecf763b02048d2cf4c17967a7b2cacc7afd6af0e963a39579d876f8f4170e3c,Command:[],Args:[--namespace=$(NAMESPACE) --images=perses=$(RELATED_IMAGE_PERSES) --images=alertmanager=$(RELATED_IMAGE_ALERTMANAGER) --images=prometheus=$(RELATED_IMAGE_PROMETHEUS) --images=thanos=$(RELATED_IMAGE_THANOS) --images=ui-dashboards=$(RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN) --images=ui-distributed-tracing=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN) --images=ui-distributed-tracing-pf5=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5) --images=ui-distributed-tracing-pf4=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4) --images=ui-logging=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN) --images=ui-logging-pf4=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4) --images=ui-troubleshooting-panel=$(RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN) --images=ui-monitoring=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN) --images=ui-monitoring-pf5=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5) --images=korrel8r=$(RELATED_IMAGE_KORREL8R) --images=health-analyzer=$(RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER) --openshift.enabled=true],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:RELATED_IMAGE_ALERTMANAGER,Value:registry.redhat.io/cluster-observability-operator/alertmanager-rhel9@sha256:dc62889b883f597de91b5389cc52c84c607247d49a807693be2f688e4703dfc3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS,Value:registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:1b555e21bba7c609111ace4380382a696d9aceeb6e9816bf9023b8f689b6c741,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_THANOS,Value:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:a223bab813b82d698992490bbb60927f6288a83ba52d539836c250e1471f6d34,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PERSES,Value:registry.redhat.io/cluster-observability-operator/perses-rhel9@sha256:e797cdb47beef40b04da7b6d645bca3dc32e6247003c45b56b38efd9e13bf01c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:093d2731ac848ed5fd57356b155a19d3bf7b8db96d95b09c5d0095e143f7254f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-rhel9@sha256:7d662a120305e2528acc7e9142b770b5b6a7f4932ddfcadfa4ac953935124895,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf5-rhel9@sha256:75465aabb0aa427a5c531a8fcde463f6d119afbcc618ebcbf6b7ee9bc8aad160,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-pf4-rhel9@sha256:dc18c8d6a4a9a0a574a57cc5082c8a9b26023bd6d69b9732892d584c1dfe5070,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-rhel9@sha256:369729978cecdc13c99ef3d179f8eb8a450a4a0cb70b63c27a55a15d1710ba27,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-pf4-rhel9@sha256:d8c7a61d147f62b204d5c5f16864386025393453c9a81ea327bbd25d7765d611,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/troubleshooting-panel-console-plugin-rhel9@sha256:b4a6eb1cc118a4334b424614959d8b7f361ddd779b3a72690ca49b0a3f26d9b8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-rhel9@sha256:21d4fff670893ba4b7fbc528cd49f8b71c8281cede9ef84f0697065bb6a7fc50,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-pf5-rhel9@sha256:12d9dbe297a1c3b9df671f21156992082bc483887d851fafe76e5d17321ff474,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KORREL8R,Value:registry.redhat.io/cluster-observability-operator/korrel8r-rhel9@sha256:e65c37f04f6d76a0cbfe05edb3cddf6a8f14f859ee35cf3aebea8fcb991d2c19,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER,Value:registry.redhat.io/cluster-observability-operator/cluster-health-analyzer-rhel9@sha256:48e4e178c6eeaa9d5dd77a591c185a311b4b4a5caadb7199d48463123e31dc9e,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{400 -3} {} 400m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:observability-operator-tls,ReadOnly:true,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t4cvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod observability-operator-59bdc8b94-d85kb_openshift-operators(4418d202-0abd-48f2-8216-49462f1f5e1f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:26:26 crc kubenswrapper[4852]: E0129 12:26:26.035850 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" podUID="4418d202-0abd-48f2-8216-49462f1f5e1f" Jan 29 12:26:26 crc kubenswrapper[4852]: E0129 12:26:26.593378 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:2ecf763b02048d2cf4c17967a7b2cacc7afd6af0e963a39579d876f8f4170e3c\\\"\"" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" podUID="4418d202-0abd-48f2-8216-49462f1f5e1f" Jan 29 12:26:30 crc kubenswrapper[4852]: I0129 12:26:30.017611 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:26:30 crc kubenswrapper[4852]: I0129 12:26:30.018320 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.706096 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" event={"ID":"db57a9e8-3292-4515-a24d-244418fc98ba","Type":"ContainerStarted","Data":"ab17c9b3efcca8c48853228f76c3621563f19c05962fd9e1d96ba1a9a1059878"} Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.706784 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.708549 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" event={"ID":"a9853c72-f7e7-4ece-b626-e7d5a6fdbef1","Type":"ContainerStarted","Data":"ad2edc04d814de562e4aa2dc1ef864a7405e727217b8e313addc874deab33c9e"} Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.710825 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" event={"ID":"d72564b9-92ba-48b1-ac14-3f7d0c257191","Type":"ContainerStarted","Data":"f52aebac86ebeecef5cedacca46d854ae145fe30929d4023d911e1ba3d528663"} Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.713568 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" event={"ID":"a34c5dcb-da69-43ae-9e4b-42a942b3cf40","Type":"ContainerStarted","Data":"d8ab079463d5459a292485542d8dc4acbe5d50987a33a4c79de4c6c12bce37fd"} Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.735329 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" podStartSLOduration=2.418569887 podStartE2EDuration="36.735311232s" podCreationTimestamp="2026-01-29 12:26:02 +0000 UTC" firstStartedPulling="2026-01-29 12:26:03.54522904 +0000 UTC m=+6260.762560174" lastFinishedPulling="2026-01-29 12:26:37.861970375 +0000 UTC m=+6295.079301519" observedRunningTime="2026-01-29 12:26:38.726158259 +0000 UTC m=+6295.943489393" watchObservedRunningTime="2026-01-29 12:26:38.735311232 +0000 UTC m=+6295.952642366" Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.751911 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-pjngj" podStartSLOduration=2.21405601 podStartE2EDuration="36.751892147s" podCreationTimestamp="2026-01-29 12:26:02 +0000 UTC" firstStartedPulling="2026-01-29 12:26:03.317694891 +0000 UTC m=+6260.535026035" lastFinishedPulling="2026-01-29 12:26:37.855531038 +0000 UTC m=+6295.072862172" observedRunningTime="2026-01-29 12:26:38.750779429 +0000 UTC m=+6295.968110563" watchObservedRunningTime="2026-01-29 12:26:38.751892147 +0000 UTC m=+6295.969223281" Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.782918 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-wxsg7" podStartSLOduration=3.11854182 podStartE2EDuration="37.782900933s" podCreationTimestamp="2026-01-29 12:26:01 +0000 UTC" firstStartedPulling="2026-01-29 12:26:03.195288516 +0000 UTC m=+6260.412619650" lastFinishedPulling="2026-01-29 12:26:37.859647629 +0000 UTC m=+6295.076978763" observedRunningTime="2026-01-29 12:26:38.777806978 +0000 UTC m=+6295.995138102" watchObservedRunningTime="2026-01-29 12:26:38.782900933 +0000 UTC m=+6296.000232067" Jan 29 12:26:38 crc kubenswrapper[4852]: I0129 12:26:38.821233 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6448976d6-z4bpj" podStartSLOduration=-9223372000.033564 podStartE2EDuration="36.821212456s" podCreationTimestamp="2026-01-29 12:26:02 +0000 UTC" firstStartedPulling="2026-01-29 12:26:03.634936697 +0000 UTC m=+6260.852267831" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:26:38.810997728 +0000 UTC m=+6296.028328862" watchObservedRunningTime="2026-01-29 12:26:38.821212456 +0000 UTC m=+6296.038543590" Jan 29 12:26:43 crc kubenswrapper[4852]: I0129 12:26:43.757858 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" event={"ID":"4418d202-0abd-48f2-8216-49462f1f5e1f","Type":"ContainerStarted","Data":"015b5386b91010c9401e9833c46c58d0c2536042b53a6024b93b6109020e783b"} Jan 29 12:26:43 crc kubenswrapper[4852]: I0129 12:26:43.758942 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:43 crc kubenswrapper[4852]: I0129 12:26:43.760231 4852 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-d85kb container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.1.127:8081/healthz\": dial tcp 10.217.1.127:8081: connect: connection refused" start-of-body= Jan 29 12:26:43 crc kubenswrapper[4852]: I0129 12:26:43.760287 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" podUID="4418d202-0abd-48f2-8216-49462f1f5e1f" containerName="operator" probeResult="failure" output="Get \"http://10.217.1.127:8081/healthz\": dial tcp 10.217.1.127:8081: connect: connection refused" Jan 29 12:26:43 crc kubenswrapper[4852]: I0129 12:26:43.793995 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" podStartSLOduration=2.2895508700000002 podStartE2EDuration="41.793975161s" podCreationTimestamp="2026-01-29 12:26:02 +0000 UTC" firstStartedPulling="2026-01-29 12:26:03.777832432 +0000 UTC m=+6260.995163566" lastFinishedPulling="2026-01-29 12:26:43.282256713 +0000 UTC m=+6300.499587857" observedRunningTime="2026-01-29 12:26:43.785960195 +0000 UTC m=+6301.003291369" watchObservedRunningTime="2026-01-29 12:26:43.793975161 +0000 UTC m=+6301.011306305" Jan 29 12:26:44 crc kubenswrapper[4852]: I0129 12:26:44.769542 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-d85kb" Jan 29 12:26:52 crc kubenswrapper[4852]: I0129 12:26:52.861137 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-2p5pq" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.715166 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.716050 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" containerName="openstackclient" containerID="cri-o://98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc" gracePeriod=2 Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.729156 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.819153 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 12:26:54 crc kubenswrapper[4852]: E0129 12:26:54.819728 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" containerName="openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.819752 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" containerName="openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.820023 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" containerName="openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.820989 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.890476 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.922466 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" podUID="b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.965289 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config-secret\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.965367 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92j4l\" (UniqueName: \"kubernetes.io/projected/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-kube-api-access-92j4l\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:54 crc kubenswrapper[4852]: I0129 12:26:54.965419 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.067991 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config-secret\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.068090 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92j4l\" (UniqueName: \"kubernetes.io/projected/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-kube-api-access-92j4l\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.068148 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.069907 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.089183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-openstack-config-secret\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.130086 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92j4l\" (UniqueName: \"kubernetes.io/projected/b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6-kube-api-access-92j4l\") pod \"openstackclient\" (UID: \"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6\") " pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.133324 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.136066 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.143804 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.144721 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-wrp5q" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.166514 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.272970 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgj65\" (UniqueName: \"kubernetes.io/projected/68096b01-9bf9-4e74-9dc9-3521b1ea4fba-kube-api-access-pgj65\") pod \"kube-state-metrics-0\" (UID: \"68096b01-9bf9-4e74-9dc9-3521b1ea4fba\") " pod="openstack/kube-state-metrics-0" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.376060 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgj65\" (UniqueName: \"kubernetes.io/projected/68096b01-9bf9-4e74-9dc9-3521b1ea4fba-kube-api-access-pgj65\") pod \"kube-state-metrics-0\" (UID: \"68096b01-9bf9-4e74-9dc9-3521b1ea4fba\") " pod="openstack/kube-state-metrics-0" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.424647 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgj65\" (UniqueName: \"kubernetes.io/projected/68096b01-9bf9-4e74-9dc9-3521b1ea4fba-kube-api-access-pgj65\") pod \"kube-state-metrics-0\" (UID: \"68096b01-9bf9-4e74-9dc9-3521b1ea4fba\") " pod="openstack/kube-state-metrics-0" Jan 29 12:26:55 crc kubenswrapper[4852]: I0129 12:26:55.631105 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.005457 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.014964 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.017025 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.021972 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.022240 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.022305 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.035963 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-wrrr6" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.039535 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131215 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131270 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68xq9\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-kube-api-access-68xq9\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131292 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131315 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131364 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131409 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.131445 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.163135 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.234673 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235091 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235127 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68xq9\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-kube-api-access-68xq9\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235146 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235169 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235216 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.235262 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.236477 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.252534 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.261545 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.270118 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.273237 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68xq9\" (UniqueName: \"kubernetes.io/projected/8da49752-6061-42b7-b410-7ce385b6a075-kube-api-access-68xq9\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.275244 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/8da49752-6061-42b7-b410-7ce385b6a075-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.291153 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8da49752-6061-42b7-b410-7ce385b6a075-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"8da49752-6061-42b7-b410-7ce385b6a075\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.376360 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.523315 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 12:26:56 crc kubenswrapper[4852]: W0129 12:26:56.537223 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68096b01_9bf9_4e74_9dc9_3521b1ea4fba.slice/crio-7e51952ac9be8078a7d422ad8e85348fdf40b2944bd95205fa0d1a7c7a6e9f0b WatchSource:0}: Error finding container 7e51952ac9be8078a7d422ad8e85348fdf40b2944bd95205fa0d1a7c7a6e9f0b: Status 404 returned error can't find the container with id 7e51952ac9be8078a7d422ad8e85348fdf40b2944bd95205fa0d1a7c7a6e9f0b Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.591318 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.595020 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.611163 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.611719 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.666972 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-6tddk" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.667480 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.667034 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.667138 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.667194 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.699944 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.714638 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.754965 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5c8c\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-kube-api-access-d5c8c\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755538 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755577 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755672 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755731 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755765 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755793 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5af19bd0-65c2-4a9b-9129-13c8d890652e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755845 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755948 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.755982 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868095 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868222 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868376 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5c8c\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-kube-api-access-d5c8c\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868449 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868479 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868536 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868566 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868607 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5af19bd0-65c2-4a9b-9129-13c8d890652e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868635 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.868682 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.870860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.873413 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.874483 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.882349 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5af19bd0-65c2-4a9b-9129-13c8d890652e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.883050 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.884352 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5af19bd0-65c2-4a9b-9129-13c8d890652e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.886407 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.897020 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5af19bd0-65c2-4a9b-9129-13c8d890652e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.908539 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5c8c\" (UniqueName: \"kubernetes.io/projected/5af19bd0-65c2-4a9b-9129-13c8d890652e-kube-api-access-d5c8c\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.915005 4852 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.915060 4852 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0fdc0dd063d54f8d146cbab806a03aed7b7b063622155fff055aae67b61a01ce/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:56 crc kubenswrapper[4852]: I0129 12:26:56.978394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6","Type":"ContainerStarted","Data":"762f4a85e5ff2a254f3a0d4f26dc90e696ed77340137a063f7a7c5bf49f3041a"} Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:56.999456 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"68096b01-9bf9-4e74-9dc9-3521b1ea4fba","Type":"ContainerStarted","Data":"7e51952ac9be8078a7d422ad8e85348fdf40b2944bd95205fa0d1a7c7a6e9f0b"} Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.458177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-681ad5e2-437e-491f-95e5-c70daf0ac49a\") pod \"prometheus-metric-storage-0\" (UID: \"5af19bd0-65c2-4a9b-9129-13c8d890652e\") " pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.553378 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.562381 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.750753 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.763521 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret\") pod \"8dc6c019-745a-4ee8-97a2-efd0347d376e\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.763900 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thct5\" (UniqueName: \"kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5\") pod \"8dc6c019-745a-4ee8-97a2-efd0347d376e\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.764197 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config\") pod \"8dc6c019-745a-4ee8-97a2-efd0347d376e\" (UID: \"8dc6c019-745a-4ee8-97a2-efd0347d376e\") " Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.776711 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5" (OuterVolumeSpecName: "kube-api-access-thct5") pod "8dc6c019-745a-4ee8-97a2-efd0347d376e" (UID: "8dc6c019-745a-4ee8-97a2-efd0347d376e"). InnerVolumeSpecName "kube-api-access-thct5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.803049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "8dc6c019-745a-4ee8-97a2-efd0347d376e" (UID: "8dc6c019-745a-4ee8-97a2-efd0347d376e"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.861959 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "8dc6c019-745a-4ee8-97a2-efd0347d376e" (UID: "8dc6c019-745a-4ee8-97a2-efd0347d376e"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.867441 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thct5\" (UniqueName: \"kubernetes.io/projected/8dc6c019-745a-4ee8-97a2-efd0347d376e-kube-api-access-thct5\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.867481 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:57 crc kubenswrapper[4852]: I0129 12:26:57.867493 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8dc6c019-745a-4ee8-97a2-efd0347d376e-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.010525 4852 generic.go:334] "Generic (PLEG): container finished" podID="8dc6c019-745a-4ee8-97a2-efd0347d376e" containerID="98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc" exitCode=137 Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.010573 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.010636 4852 scope.go:117] "RemoveContainer" containerID="98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.013171 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6","Type":"ContainerStarted","Data":"801281745d47d7b6a733fb55154c05cee477b012c052c142cd3ab4a6b75dcec0"} Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.014737 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"8da49752-6061-42b7-b410-7ce385b6a075","Type":"ContainerStarted","Data":"4f27d777524fa8ca1bab922877a4d4664043bd42000f51e7ea640cfe472f2667"} Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.039400 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=4.039377214 podStartE2EDuration="4.039377214s" podCreationTimestamp="2026-01-29 12:26:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:26:58.036606707 +0000 UTC m=+6315.253937851" watchObservedRunningTime="2026-01-29 12:26:58.039377214 +0000 UTC m=+6315.256708358" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.042965 4852 scope.go:117] "RemoveContainer" containerID="98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.044018 4852 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" podUID="b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6" Jan 29 12:26:58 crc kubenswrapper[4852]: E0129 12:26:58.046342 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc\": container with ID starting with 98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc not found: ID does not exist" containerID="98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.046373 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc"} err="failed to get container status \"98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc\": rpc error: code = NotFound desc = could not find container \"98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc\": container with ID starting with 98ae2506832bd24092b284d16ea58fe500b9efef67fca894a516395dfaf3d3cc not found: ID does not exist" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.127114 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.277919 4852 scope.go:117] "RemoveContainer" containerID="bfe0cbafb0eca0cfb38f9b372dc9422ca1ff807ed09a20aa75e188722510d602" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.401104 4852 scope.go:117] "RemoveContainer" containerID="9c3c5e0d3579881f28eee44c35473d0870f02263cdc8b8b11aab1c48e7619d5e" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.651613 4852 scope.go:117] "RemoveContainer" containerID="c1df753753733380932cd04165424ba9439dbded91803d63dca62063917f48a8" Jan 29 12:26:58 crc kubenswrapper[4852]: I0129 12:26:58.770319 4852 scope.go:117] "RemoveContainer" containerID="cccacb92aaa13713e9afaaf3151b67a846e605d3abdb81ff82a6f4098d722da6" Jan 29 12:26:59 crc kubenswrapper[4852]: I0129 12:26:59.028534 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerStarted","Data":"fad1ac1287adcff67d2204e7fc40b262fe0a26154edba31aa3b81eedb63d7bcb"} Jan 29 12:26:59 crc kubenswrapper[4852]: I0129 12:26:59.476185 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dc6c019-745a-4ee8-97a2-efd0347d376e" path="/var/lib/kubelet/pods/8dc6c019-745a-4ee8-97a2-efd0347d376e/volumes" Jan 29 12:27:00 crc kubenswrapper[4852]: I0129 12:27:00.017160 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:27:00 crc kubenswrapper[4852]: I0129 12:27:00.017706 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:27:00 crc kubenswrapper[4852]: I0129 12:27:00.045644 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"68096b01-9bf9-4e74-9dc9-3521b1ea4fba","Type":"ContainerStarted","Data":"1b0f7945bcb4412d4250e54e0dd1e668349026b21013b41a92bb1bc281f2a192"} Jan 29 12:27:00 crc kubenswrapper[4852]: I0129 12:27:00.045792 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 12:27:00 crc kubenswrapper[4852]: I0129 12:27:00.071758 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.02422491 podStartE2EDuration="5.071736855s" podCreationTimestamp="2026-01-29 12:26:55 +0000 UTC" firstStartedPulling="2026-01-29 12:26:56.569795698 +0000 UTC m=+6313.787126832" lastFinishedPulling="2026-01-29 12:26:59.617307643 +0000 UTC m=+6316.834638777" observedRunningTime="2026-01-29 12:27:00.064799236 +0000 UTC m=+6317.282130370" watchObservedRunningTime="2026-01-29 12:27:00.071736855 +0000 UTC m=+6317.289067989" Jan 29 12:27:05 crc kubenswrapper[4852]: I0129 12:27:05.636766 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.045811 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-hw4nm"] Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.057373 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-4c4b-account-create-update-g5kfb"] Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.067814 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-4c4b-account-create-update-g5kfb"] Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.079206 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-hw4nm"] Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.153801 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"8da49752-6061-42b7-b410-7ce385b6a075","Type":"ContainerStarted","Data":"93332ea24488d8ffb52f6b4b07cf34ac09b3ce756ff75311855b24823d4c5473"} Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.156192 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerStarted","Data":"788148f328a97815549d12b8601d019de6b0718fcc5ca1b4b4f31105a3e1faf7"} Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.474019 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="225c5c60-ced3-48b4-88dc-23d98191127a" path="/var/lib/kubelet/pods/225c5c60-ced3-48b4-88dc-23d98191127a/volumes" Jan 29 12:27:09 crc kubenswrapper[4852]: I0129 12:27:09.627375 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d35e0fe5-a6f0-46c3-8259-e1d777d01d1e" path="/var/lib/kubelet/pods/d35e0fe5-a6f0-46c3-8259-e1d777d01d1e/volumes" Jan 29 12:27:15 crc kubenswrapper[4852]: I0129 12:27:15.229190 4852 generic.go:334] "Generic (PLEG): container finished" podID="8da49752-6061-42b7-b410-7ce385b6a075" containerID="93332ea24488d8ffb52f6b4b07cf34ac09b3ce756ff75311855b24823d4c5473" exitCode=0 Jan 29 12:27:15 crc kubenswrapper[4852]: I0129 12:27:15.229311 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"8da49752-6061-42b7-b410-7ce385b6a075","Type":"ContainerDied","Data":"93332ea24488d8ffb52f6b4b07cf34ac09b3ce756ff75311855b24823d4c5473"} Jan 29 12:27:16 crc kubenswrapper[4852]: I0129 12:27:16.240107 4852 generic.go:334] "Generic (PLEG): container finished" podID="5af19bd0-65c2-4a9b-9129-13c8d890652e" containerID="788148f328a97815549d12b8601d019de6b0718fcc5ca1b4b4f31105a3e1faf7" exitCode=0 Jan 29 12:27:16 crc kubenswrapper[4852]: I0129 12:27:16.240368 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerDied","Data":"788148f328a97815549d12b8601d019de6b0718fcc5ca1b4b4f31105a3e1faf7"} Jan 29 12:27:19 crc kubenswrapper[4852]: I0129 12:27:19.035369 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-9glwg"] Jan 29 12:27:19 crc kubenswrapper[4852]: I0129 12:27:19.049404 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-9glwg"] Jan 29 12:27:19 crc kubenswrapper[4852]: I0129 12:27:19.302864 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"8da49752-6061-42b7-b410-7ce385b6a075","Type":"ContainerStarted","Data":"5ef7c22904254bab7e1b905520b70e93c575a0e66af0a503052c7a3482c1b7fe"} Jan 29 12:27:19 crc kubenswrapper[4852]: I0129 12:27:19.486813 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c8af433-a656-4d8d-8c42-65ee5672e549" path="/var/lib/kubelet/pods/9c8af433-a656-4d8d-8c42-65ee5672e549/volumes" Jan 29 12:27:23 crc kubenswrapper[4852]: I0129 12:27:23.352545 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"8da49752-6061-42b7-b410-7ce385b6a075","Type":"ContainerStarted","Data":"758d2fc496ae43bdf5bca140266b8ea087f4aca792472959c4a8d01a6f896b08"} Jan 29 12:27:23 crc kubenswrapper[4852]: I0129 12:27:23.353215 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Jan 29 12:27:23 crc kubenswrapper[4852]: I0129 12:27:23.355027 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Jan 29 12:27:23 crc kubenswrapper[4852]: I0129 12:27:23.383683 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=7.266516676 podStartE2EDuration="28.383662119s" podCreationTimestamp="2026-01-29 12:26:55 +0000 UTC" firstStartedPulling="2026-01-29 12:26:57.535093017 +0000 UTC m=+6314.752424151" lastFinishedPulling="2026-01-29 12:27:18.65223846 +0000 UTC m=+6335.869569594" observedRunningTime="2026-01-29 12:27:23.37510625 +0000 UTC m=+6340.592437404" watchObservedRunningTime="2026-01-29 12:27:23.383662119 +0000 UTC m=+6340.600993253" Jan 29 12:27:27 crc kubenswrapper[4852]: I0129 12:27:27.388734 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerStarted","Data":"26e4d3b8334a746eb68c758f0c16e2b1730785af1682c5b5d6cd12fff2d6bae3"} Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.016662 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.017264 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.017323 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.018238 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.018390 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" gracePeriod=600 Jan 29 12:27:30 crc kubenswrapper[4852]: E0129 12:27:30.224556 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.420696 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" exitCode=0 Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.420740 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751"} Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.420829 4852 scope.go:117] "RemoveContainer" containerID="e89b72b5258ba940c66e1c527c76cdcb417906e73db974c3fcb99b3d86470719" Jan 29 12:27:30 crc kubenswrapper[4852]: I0129 12:27:30.421498 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:27:30 crc kubenswrapper[4852]: E0129 12:27:30.421781 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:27:31 crc kubenswrapper[4852]: I0129 12:27:31.432785 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerStarted","Data":"51a744d72ea5207e67e988358d71cd425e3fee558645eca7bdbe621069146721"} Jan 29 12:27:40 crc kubenswrapper[4852]: I0129 12:27:40.556871 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"5af19bd0-65c2-4a9b-9129-13c8d890652e","Type":"ContainerStarted","Data":"8f81bfb37ea8fd22337112f7f2d7122fe773694a4f82f313ae943d60ec374082"} Jan 29 12:27:41 crc kubenswrapper[4852]: I0129 12:27:41.623481 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.6456340449999995 podStartE2EDuration="46.623451019s" podCreationTimestamp="2026-01-29 12:26:55 +0000 UTC" firstStartedPulling="2026-01-29 12:26:58.136857942 +0000 UTC m=+6315.354189076" lastFinishedPulling="2026-01-29 12:27:40.114674906 +0000 UTC m=+6357.332006050" observedRunningTime="2026-01-29 12:27:41.617554304 +0000 UTC m=+6358.834885478" watchObservedRunningTime="2026-01-29 12:27:41.623451019 +0000 UTC m=+6358.840782203" Jan 29 12:27:42 crc kubenswrapper[4852]: I0129 12:27:42.465028 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:27:42 crc kubenswrapper[4852]: E0129 12:27:42.465274 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:27:42 crc kubenswrapper[4852]: I0129 12:27:42.563426 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 29 12:27:42 crc kubenswrapper[4852]: I0129 12:27:42.563718 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 29 12:27:42 crc kubenswrapper[4852]: I0129 12:27:42.565834 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 29 12:27:42 crc kubenswrapper[4852]: I0129 12:27:42.591217 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.381940 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.386880 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.390478 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.390806 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.409745 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.532949 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533008 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533074 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533097 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533120 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533134 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.533170 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7shz\" (UniqueName: \"kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.634734 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635137 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635274 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7shz\" (UniqueName: \"kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635480 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635608 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635770 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.635856 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.636793 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.637067 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.648172 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.648479 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.649276 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.650025 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.653229 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7shz\" (UniqueName: \"kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz\") pod \"ceilometer-0\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " pod="openstack/ceilometer-0" Jan 29 12:27:45 crc kubenswrapper[4852]: I0129 12:27:45.706255 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:27:46 crc kubenswrapper[4852]: I0129 12:27:46.306845 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:27:46 crc kubenswrapper[4852]: W0129 12:27:46.314698 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0f1b92d_6285_478d_98c1_278aa2426e5f.slice/crio-94369b58116b48b6a33a3b92b0d33280a93efa8fe377abb35abd7fe9be23f349 WatchSource:0}: Error finding container 94369b58116b48b6a33a3b92b0d33280a93efa8fe377abb35abd7fe9be23f349: Status 404 returned error can't find the container with id 94369b58116b48b6a33a3b92b0d33280a93efa8fe377abb35abd7fe9be23f349 Jan 29 12:27:46 crc kubenswrapper[4852]: I0129 12:27:46.626335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerStarted","Data":"94369b58116b48b6a33a3b92b0d33280a93efa8fe377abb35abd7fe9be23f349"} Jan 29 12:27:49 crc kubenswrapper[4852]: I0129 12:27:49.654599 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerStarted","Data":"1308a8fd57b60dc1d2f2803527086e087e44c6783c97ee24a9c1e67d3b68ed56"} Jan 29 12:27:50 crc kubenswrapper[4852]: I0129 12:27:50.674140 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerStarted","Data":"4db658a9d5c00870a89924fd37a8139963c2d0a043beea10f133391be09b9a33"} Jan 29 12:27:53 crc kubenswrapper[4852]: I0129 12:27:53.704013 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerStarted","Data":"01f921df2578275927d80bec6132a65055bb1a55f67db373754a9206b7b18a9f"} Jan 29 12:27:54 crc kubenswrapper[4852]: I0129 12:27:54.463511 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:27:54 crc kubenswrapper[4852]: E0129 12:27:54.463741 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:27:56 crc kubenswrapper[4852]: I0129 12:27:56.740611 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerStarted","Data":"11a96962ca7995f87e8cc7aa515820cb8d68c7b9dbdcdc52b0e713739a7dcc07"} Jan 29 12:27:56 crc kubenswrapper[4852]: I0129 12:27:56.741183 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 12:27:56 crc kubenswrapper[4852]: I0129 12:27:56.778843 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.604273615 podStartE2EDuration="11.778820302s" podCreationTimestamp="2026-01-29 12:27:45 +0000 UTC" firstStartedPulling="2026-01-29 12:27:46.317335332 +0000 UTC m=+6363.534666466" lastFinishedPulling="2026-01-29 12:27:55.491882019 +0000 UTC m=+6372.709213153" observedRunningTime="2026-01-29 12:27:56.761828358 +0000 UTC m=+6373.979159502" watchObservedRunningTime="2026-01-29 12:27:56.778820302 +0000 UTC m=+6373.996151436" Jan 29 12:27:59 crc kubenswrapper[4852]: I0129 12:27:59.356178 4852 scope.go:117] "RemoveContainer" containerID="a7273c844f72ac3a949308876ed1d0d5d08f22a75058aaf529b0edab72bd4b02" Jan 29 12:27:59 crc kubenswrapper[4852]: I0129 12:27:59.864179 4852 scope.go:117] "RemoveContainer" containerID="0a2ef7543c6eb2a01a8730d03f63274bdeefb77c9e467721b799ef795e712c67" Jan 29 12:27:59 crc kubenswrapper[4852]: I0129 12:27:59.955713 4852 scope.go:117] "RemoveContainer" containerID="fc4a151e6dd42b4ee8f1e5187b02d2f1b1c992aefafb1059b1bc2a9a52c33c70" Jan 29 12:28:02 crc kubenswrapper[4852]: I0129 12:28:02.967442 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-m7s79"] Jan 29 12:28:02 crc kubenswrapper[4852]: I0129 12:28:02.969306 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.013333 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-m7s79"] Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.015747 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.015839 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vct55\" (UniqueName: \"kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.117709 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.117849 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vct55\" (UniqueName: \"kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.118889 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.139167 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vct55\" (UniqueName: \"kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55\") pod \"aodh-db-create-m7s79\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.293929 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-65d0-account-create-update-plgxr"] Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.295747 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.297416 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.297729 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.306296 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-65d0-account-create-update-plgxr"] Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.323178 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhmj4\" (UniqueName: \"kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.323537 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.425419 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhmj4\" (UniqueName: \"kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.425771 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.427026 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.442352 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhmj4\" (UniqueName: \"kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4\") pod \"aodh-65d0-account-create-update-plgxr\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.708662 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:03 crc kubenswrapper[4852]: I0129 12:28:03.983282 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-m7s79"] Jan 29 12:28:03 crc kubenswrapper[4852]: W0129 12:28:03.988486 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5a6620e_7c4b_40e5_b818_b44c5d5c123d.slice/crio-6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca WatchSource:0}: Error finding container 6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca: Status 404 returned error can't find the container with id 6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca Jan 29 12:28:04 crc kubenswrapper[4852]: W0129 12:28:04.226785 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda424b366_8e6d_47d8_818d_7cd5ac80081f.slice/crio-c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15 WatchSource:0}: Error finding container c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15: Status 404 returned error can't find the container with id c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15 Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.236032 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-65d0-account-create-update-plgxr"] Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.847603 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-65d0-account-create-update-plgxr" event={"ID":"a424b366-8e6d-47d8-818d-7cd5ac80081f","Type":"ContainerStarted","Data":"762141e5200b19617f328f1e6ca0b09983fa76bf45951f60619eb089758684fe"} Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.847910 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-65d0-account-create-update-plgxr" event={"ID":"a424b366-8e6d-47d8-818d-7cd5ac80081f","Type":"ContainerStarted","Data":"c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15"} Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.849105 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-m7s79" event={"ID":"b5a6620e-7c4b-40e5-b818-b44c5d5c123d","Type":"ContainerStarted","Data":"03d17b6866fc3463a74a364a92a8caba0d4b39328eee17c56b4d0454b7fff98e"} Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.849145 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-m7s79" event={"ID":"b5a6620e-7c4b-40e5-b818-b44c5d5c123d","Type":"ContainerStarted","Data":"6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca"} Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.868646 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-65d0-account-create-update-plgxr" podStartSLOduration=1.8685548459999999 podStartE2EDuration="1.868554846s" podCreationTimestamp="2026-01-29 12:28:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:04.860163181 +0000 UTC m=+6382.077494315" watchObservedRunningTime="2026-01-29 12:28:04.868554846 +0000 UTC m=+6382.085885980" Jan 29 12:28:04 crc kubenswrapper[4852]: I0129 12:28:04.875105 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-m7s79" podStartSLOduration=2.875088145 podStartE2EDuration="2.875088145s" podCreationTimestamp="2026-01-29 12:28:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:04.871713393 +0000 UTC m=+6382.089044547" watchObservedRunningTime="2026-01-29 12:28:04.875088145 +0000 UTC m=+6382.092419279" Jan 29 12:28:05 crc kubenswrapper[4852]: I0129 12:28:05.863622 4852 generic.go:334] "Generic (PLEG): container finished" podID="b5a6620e-7c4b-40e5-b818-b44c5d5c123d" containerID="03d17b6866fc3463a74a364a92a8caba0d4b39328eee17c56b4d0454b7fff98e" exitCode=0 Jan 29 12:28:05 crc kubenswrapper[4852]: I0129 12:28:05.863686 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-m7s79" event={"ID":"b5a6620e-7c4b-40e5-b818-b44c5d5c123d","Type":"ContainerDied","Data":"03d17b6866fc3463a74a364a92a8caba0d4b39328eee17c56b4d0454b7fff98e"} Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.376742 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.422559 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vct55\" (UniqueName: \"kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55\") pod \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.423074 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts\") pod \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\" (UID: \"b5a6620e-7c4b-40e5-b818-b44c5d5c123d\") " Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.424963 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b5a6620e-7c4b-40e5-b818-b44c5d5c123d" (UID: "b5a6620e-7c4b-40e5-b818-b44c5d5c123d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.430771 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55" (OuterVolumeSpecName: "kube-api-access-vct55") pod "b5a6620e-7c4b-40e5-b818-b44c5d5c123d" (UID: "b5a6620e-7c4b-40e5-b818-b44c5d5c123d"). InnerVolumeSpecName "kube-api-access-vct55". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.525478 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vct55\" (UniqueName: \"kubernetes.io/projected/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-kube-api-access-vct55\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.525790 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5a6620e-7c4b-40e5-b818-b44c5d5c123d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.883345 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-m7s79" event={"ID":"b5a6620e-7c4b-40e5-b818-b44c5d5c123d","Type":"ContainerDied","Data":"6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca"} Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.883397 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b4d9f99eeb9f65a5ad867ebe418e88f2918ff4b40f68ffc02776063546850ca" Jan 29 12:28:07 crc kubenswrapper[4852]: I0129 12:28:07.883642 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-m7s79" Jan 29 12:28:08 crc kubenswrapper[4852]: I0129 12:28:08.896191 4852 generic.go:334] "Generic (PLEG): container finished" podID="a424b366-8e6d-47d8-818d-7cd5ac80081f" containerID="762141e5200b19617f328f1e6ca0b09983fa76bf45951f60619eb089758684fe" exitCode=0 Jan 29 12:28:08 crc kubenswrapper[4852]: I0129 12:28:08.896321 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-65d0-account-create-update-plgxr" event={"ID":"a424b366-8e6d-47d8-818d-7cd5ac80081f","Type":"ContainerDied","Data":"762141e5200b19617f328f1e6ca0b09983fa76bf45951f60619eb089758684fe"} Jan 29 12:28:09 crc kubenswrapper[4852]: I0129 12:28:09.463862 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:28:09 crc kubenswrapper[4852]: E0129 12:28:09.464162 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.348783 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.495976 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts\") pod \"a424b366-8e6d-47d8-818d-7cd5ac80081f\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.496725 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhmj4\" (UniqueName: \"kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4\") pod \"a424b366-8e6d-47d8-818d-7cd5ac80081f\" (UID: \"a424b366-8e6d-47d8-818d-7cd5ac80081f\") " Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.496923 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a424b366-8e6d-47d8-818d-7cd5ac80081f" (UID: "a424b366-8e6d-47d8-818d-7cd5ac80081f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.497692 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a424b366-8e6d-47d8-818d-7cd5ac80081f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.502332 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4" (OuterVolumeSpecName: "kube-api-access-bhmj4") pod "a424b366-8e6d-47d8-818d-7cd5ac80081f" (UID: "a424b366-8e6d-47d8-818d-7cd5ac80081f"). InnerVolumeSpecName "kube-api-access-bhmj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.600335 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhmj4\" (UniqueName: \"kubernetes.io/projected/a424b366-8e6d-47d8-818d-7cd5ac80081f-kube-api-access-bhmj4\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.735801 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:10 crc kubenswrapper[4852]: E0129 12:28:10.736233 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5a6620e-7c4b-40e5-b818-b44c5d5c123d" containerName="mariadb-database-create" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.736249 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5a6620e-7c4b-40e5-b818-b44c5d5c123d" containerName="mariadb-database-create" Jan 29 12:28:10 crc kubenswrapper[4852]: E0129 12:28:10.736264 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a424b366-8e6d-47d8-818d-7cd5ac80081f" containerName="mariadb-account-create-update" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.736270 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a424b366-8e6d-47d8-818d-7cd5ac80081f" containerName="mariadb-account-create-update" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.736451 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5a6620e-7c4b-40e5-b818-b44c5d5c123d" containerName="mariadb-database-create" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.736468 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a424b366-8e6d-47d8-818d-7cd5ac80081f" containerName="mariadb-account-create-update" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.737963 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.766423 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.906865 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkv2h\" (UniqueName: \"kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.907031 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.907253 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.916248 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-65d0-account-create-update-plgxr" event={"ID":"a424b366-8e6d-47d8-818d-7cd5ac80081f","Type":"ContainerDied","Data":"c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15"} Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.916283 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-65d0-account-create-update-plgxr" Jan 29 12:28:10 crc kubenswrapper[4852]: I0129 12:28:10.916298 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c96d1cc65f412206c02c31a4c8bade32a9380a09c9f59ed1479a780c89d12d15" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.009269 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.009383 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkv2h\" (UniqueName: \"kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.009443 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.009929 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.009976 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.044465 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkv2h\" (UniqueName: \"kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h\") pod \"certified-operators-pqbgw\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.189081 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.758399 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:11 crc kubenswrapper[4852]: W0129 12:28:11.764146 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb31a53b5_c783_4f5a_97c8_5b203e89a936.slice/crio-b38b5001dcca821a116246f1efe9ba243539bc2b75afbaf26a954d6a7c9504c9 WatchSource:0}: Error finding container b38b5001dcca821a116246f1efe9ba243539bc2b75afbaf26a954d6a7c9504c9: Status 404 returned error can't find the container with id b38b5001dcca821a116246f1efe9ba243539bc2b75afbaf26a954d6a7c9504c9 Jan 29 12:28:11 crc kubenswrapper[4852]: I0129 12:28:11.964106 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerStarted","Data":"b38b5001dcca821a116246f1efe9ba243539bc2b75afbaf26a954d6a7c9504c9"} Jan 29 12:28:12 crc kubenswrapper[4852]: I0129 12:28:12.975821 4852 generic.go:334] "Generic (PLEG): container finished" podID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerID="d7ef20052ab2719cbcc431918773c63f73c85ffd66e150483f123876990c3a9f" exitCode=0 Jan 29 12:28:12 crc kubenswrapper[4852]: I0129 12:28:12.976137 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerDied","Data":"d7ef20052ab2719cbcc431918773c63f73c85ffd66e150483f123876990c3a9f"} Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.640583 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-mc5d6"] Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.642428 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.646554 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.647986 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.648430 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.648725 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-tbz7d" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.651236 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-mc5d6"] Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.689434 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.689486 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb9kn\" (UniqueName: \"kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.689575 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.689611 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.791536 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.793211 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.793724 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.793920 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb9kn\" (UniqueName: \"kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.801397 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.801863 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.808105 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:13 crc kubenswrapper[4852]: I0129 12:28:13.815127 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb9kn\" (UniqueName: \"kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn\") pod \"aodh-db-sync-mc5d6\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:14 crc kubenswrapper[4852]: I0129 12:28:14.015653 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:14 crc kubenswrapper[4852]: I0129 12:28:14.750922 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-mc5d6"] Jan 29 12:28:15 crc kubenswrapper[4852]: I0129 12:28:15.013387 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerStarted","Data":"5d8efe3f46c6b3126d9a6dec0137d9c312425b4d851be66d4f9b4a212708d3c3"} Jan 29 12:28:15 crc kubenswrapper[4852]: I0129 12:28:15.016178 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mc5d6" event={"ID":"f3350cdb-cb7e-412b-8638-9ea88093fc95","Type":"ContainerStarted","Data":"6fc6da969ac9bc2421e35ecacc687a2eab95970a2d829d97ee922ab12ef42019"} Jan 29 12:28:15 crc kubenswrapper[4852]: I0129 12:28:15.721563 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 12:28:22 crc kubenswrapper[4852]: I0129 12:28:22.463633 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:28:22 crc kubenswrapper[4852]: E0129 12:28:22.464613 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:28:26 crc kubenswrapper[4852]: I0129 12:28:26.127111 4852 generic.go:334] "Generic (PLEG): container finished" podID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerID="5d8efe3f46c6b3126d9a6dec0137d9c312425b4d851be66d4f9b4a212708d3c3" exitCode=0 Jan 29 12:28:26 crc kubenswrapper[4852]: I0129 12:28:26.127211 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerDied","Data":"5d8efe3f46c6b3126d9a6dec0137d9c312425b4d851be66d4f9b4a212708d3c3"} Jan 29 12:28:33 crc kubenswrapper[4852]: I0129 12:28:33.205382 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mc5d6" event={"ID":"f3350cdb-cb7e-412b-8638-9ea88093fc95","Type":"ContainerStarted","Data":"8ca0135e77f98709c3eea226fdf1f287a43f8a18664cead78192f6272a51508b"} Jan 29 12:28:33 crc kubenswrapper[4852]: I0129 12:28:33.211076 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerStarted","Data":"2aec25983105ca513900712e35340a7d41392fdd0df94c64b7424242ffb6bfd9"} Jan 29 12:28:33 crc kubenswrapper[4852]: I0129 12:28:33.229136 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-mc5d6" podStartSLOduration=2.87045955 podStartE2EDuration="20.229115156s" podCreationTimestamp="2026-01-29 12:28:13 +0000 UTC" firstStartedPulling="2026-01-29 12:28:14.995179683 +0000 UTC m=+6392.212510817" lastFinishedPulling="2026-01-29 12:28:32.353835249 +0000 UTC m=+6409.571166423" observedRunningTime="2026-01-29 12:28:33.227262511 +0000 UTC m=+6410.444593655" watchObservedRunningTime="2026-01-29 12:28:33.229115156 +0000 UTC m=+6410.446446300" Jan 29 12:28:33 crc kubenswrapper[4852]: I0129 12:28:33.266518 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pqbgw" podStartSLOduration=3.891459003 podStartE2EDuration="23.266495466s" podCreationTimestamp="2026-01-29 12:28:10 +0000 UTC" firstStartedPulling="2026-01-29 12:28:12.978621352 +0000 UTC m=+6390.195952496" lastFinishedPulling="2026-01-29 12:28:32.353657815 +0000 UTC m=+6409.570988959" observedRunningTime="2026-01-29 12:28:33.262902329 +0000 UTC m=+6410.480233503" watchObservedRunningTime="2026-01-29 12:28:33.266495466 +0000 UTC m=+6410.483826600" Jan 29 12:28:37 crc kubenswrapper[4852]: I0129 12:28:37.265655 4852 generic.go:334] "Generic (PLEG): container finished" podID="f3350cdb-cb7e-412b-8638-9ea88093fc95" containerID="8ca0135e77f98709c3eea226fdf1f287a43f8a18664cead78192f6272a51508b" exitCode=0 Jan 29 12:28:37 crc kubenswrapper[4852]: I0129 12:28:37.265720 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mc5d6" event={"ID":"f3350cdb-cb7e-412b-8638-9ea88093fc95","Type":"ContainerDied","Data":"8ca0135e77f98709c3eea226fdf1f287a43f8a18664cead78192f6272a51508b"} Jan 29 12:28:37 crc kubenswrapper[4852]: I0129 12:28:37.463412 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:28:37 crc kubenswrapper[4852]: E0129 12:28:37.464190 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.643172 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.783530 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts\") pod \"f3350cdb-cb7e-412b-8638-9ea88093fc95\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.783658 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data\") pod \"f3350cdb-cb7e-412b-8638-9ea88093fc95\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.783768 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle\") pod \"f3350cdb-cb7e-412b-8638-9ea88093fc95\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.783843 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb9kn\" (UniqueName: \"kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn\") pod \"f3350cdb-cb7e-412b-8638-9ea88093fc95\" (UID: \"f3350cdb-cb7e-412b-8638-9ea88093fc95\") " Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.790118 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn" (OuterVolumeSpecName: "kube-api-access-hb9kn") pod "f3350cdb-cb7e-412b-8638-9ea88093fc95" (UID: "f3350cdb-cb7e-412b-8638-9ea88093fc95"). InnerVolumeSpecName "kube-api-access-hb9kn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.790388 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts" (OuterVolumeSpecName: "scripts") pod "f3350cdb-cb7e-412b-8638-9ea88093fc95" (UID: "f3350cdb-cb7e-412b-8638-9ea88093fc95"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.815936 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data" (OuterVolumeSpecName: "config-data") pod "f3350cdb-cb7e-412b-8638-9ea88093fc95" (UID: "f3350cdb-cb7e-412b-8638-9ea88093fc95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.816910 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3350cdb-cb7e-412b-8638-9ea88093fc95" (UID: "f3350cdb-cb7e-412b-8638-9ea88093fc95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.886885 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.887039 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.887126 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3350cdb-cb7e-412b-8638-9ea88093fc95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:38 crc kubenswrapper[4852]: I0129 12:28:38.887206 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb9kn\" (UniqueName: \"kubernetes.io/projected/f3350cdb-cb7e-412b-8638-9ea88093fc95-kube-api-access-hb9kn\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:39 crc kubenswrapper[4852]: I0129 12:28:39.285152 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-mc5d6" event={"ID":"f3350cdb-cb7e-412b-8638-9ea88093fc95","Type":"ContainerDied","Data":"6fc6da969ac9bc2421e35ecacc687a2eab95970a2d829d97ee922ab12ef42019"} Jan 29 12:28:39 crc kubenswrapper[4852]: I0129 12:28:39.285446 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fc6da969ac9bc2421e35ecacc687a2eab95970a2d829d97ee922ab12ef42019" Jan 29 12:28:39 crc kubenswrapper[4852]: I0129 12:28:39.285296 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-mc5d6" Jan 29 12:28:41 crc kubenswrapper[4852]: I0129 12:28:41.190496 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:41 crc kubenswrapper[4852]: I0129 12:28:41.190833 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:41 crc kubenswrapper[4852]: I0129 12:28:41.242077 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:41 crc kubenswrapper[4852]: I0129 12:28:41.364568 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:41 crc kubenswrapper[4852]: I0129 12:28:41.942839 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:43 crc kubenswrapper[4852]: I0129 12:28:43.328806 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pqbgw" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="registry-server" containerID="cri-o://2aec25983105ca513900712e35340a7d41392fdd0df94c64b7424242ffb6bfd9" gracePeriod=2 Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.010549 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 29 12:28:44 crc kubenswrapper[4852]: E0129 12:28:44.012089 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3350cdb-cb7e-412b-8638-9ea88093fc95" containerName="aodh-db-sync" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.012239 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3350cdb-cb7e-412b-8638-9ea88093fc95" containerName="aodh-db-sync" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.012613 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3350cdb-cb7e-412b-8638-9ea88093fc95" containerName="aodh-db-sync" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.017262 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.020940 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.021442 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-tbz7d" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.021706 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.027773 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.116146 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.116200 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-scripts\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.116236 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-config-data\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.116359 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx86c\" (UniqueName: \"kubernetes.io/projected/e385c621-ad37-4274-90fd-0d71c5e7ddd3-kube-api-access-fx86c\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.220217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx86c\" (UniqueName: \"kubernetes.io/projected/e385c621-ad37-4274-90fd-0d71c5e7ddd3-kube-api-access-fx86c\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.220291 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.220315 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-scripts\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.220345 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-config-data\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.226938 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-scripts\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.227798 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-config-data\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.233431 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e385c621-ad37-4274-90fd-0d71c5e7ddd3-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.252282 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx86c\" (UniqueName: \"kubernetes.io/projected/e385c621-ad37-4274-90fd-0d71c5e7ddd3-kube-api-access-fx86c\") pod \"aodh-0\" (UID: \"e385c621-ad37-4274-90fd-0d71c5e7ddd3\") " pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.342752 4852 generic.go:334] "Generic (PLEG): container finished" podID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerID="2aec25983105ca513900712e35340a7d41392fdd0df94c64b7424242ffb6bfd9" exitCode=0 Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.342800 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerDied","Data":"2aec25983105ca513900712e35340a7d41392fdd0df94c64b7424242ffb6bfd9"} Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.356016 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.452341 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.630208 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkv2h\" (UniqueName: \"kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h\") pod \"b31a53b5-c783-4f5a-97c8-5b203e89a936\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.630669 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content\") pod \"b31a53b5-c783-4f5a-97c8-5b203e89a936\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.630725 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities\") pod \"b31a53b5-c783-4f5a-97c8-5b203e89a936\" (UID: \"b31a53b5-c783-4f5a-97c8-5b203e89a936\") " Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.631734 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities" (OuterVolumeSpecName: "utilities") pod "b31a53b5-c783-4f5a-97c8-5b203e89a936" (UID: "b31a53b5-c783-4f5a-97c8-5b203e89a936"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.635890 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h" (OuterVolumeSpecName: "kube-api-access-qkv2h") pod "b31a53b5-c783-4f5a-97c8-5b203e89a936" (UID: "b31a53b5-c783-4f5a-97c8-5b203e89a936"). InnerVolumeSpecName "kube-api-access-qkv2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.686619 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b31a53b5-c783-4f5a-97c8-5b203e89a936" (UID: "b31a53b5-c783-4f5a-97c8-5b203e89a936"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.732928 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkv2h\" (UniqueName: \"kubernetes.io/projected/b31a53b5-c783-4f5a-97c8-5b203e89a936-kube-api-access-qkv2h\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.733198 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.733265 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b31a53b5-c783-4f5a-97c8-5b203e89a936-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:44 crc kubenswrapper[4852]: W0129 12:28:44.847683 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode385c621_ad37_4274_90fd_0d71c5e7ddd3.slice/crio-089857403f1333595fc5e4219f602abbc9906b0a2d0aefb58abd10b665a726b5 WatchSource:0}: Error finding container 089857403f1333595fc5e4219f602abbc9906b0a2d0aefb58abd10b665a726b5: Status 404 returned error can't find the container with id 089857403f1333595fc5e4219f602abbc9906b0a2d0aefb58abd10b665a726b5 Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.848414 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 12:28:44 crc kubenswrapper[4852]: I0129 12:28:44.851436 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.357442 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e385c621-ad37-4274-90fd-0d71c5e7ddd3","Type":"ContainerStarted","Data":"089857403f1333595fc5e4219f602abbc9906b0a2d0aefb58abd10b665a726b5"} Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.361479 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pqbgw" event={"ID":"b31a53b5-c783-4f5a-97c8-5b203e89a936","Type":"ContainerDied","Data":"b38b5001dcca821a116246f1efe9ba243539bc2b75afbaf26a954d6a7c9504c9"} Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.361553 4852 scope.go:117] "RemoveContainer" containerID="2aec25983105ca513900712e35340a7d41392fdd0df94c64b7424242ffb6bfd9" Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.361568 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pqbgw" Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.396969 4852 scope.go:117] "RemoveContainer" containerID="5d8efe3f46c6b3126d9a6dec0137d9c312425b4d851be66d4f9b4a212708d3c3" Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.402957 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.409610 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pqbgw"] Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.428621 4852 scope.go:117] "RemoveContainer" containerID="d7ef20052ab2719cbcc431918773c63f73c85ffd66e150483f123876990c3a9f" Jan 29 12:28:45 crc kubenswrapper[4852]: I0129 12:28:45.484294 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" path="/var/lib/kubelet/pods/b31a53b5-c783-4f5a-97c8-5b203e89a936/volumes" Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.074512 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.075255 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-central-agent" containerID="cri-o://1308a8fd57b60dc1d2f2803527086e087e44c6783c97ee24a9c1e67d3b68ed56" gracePeriod=30 Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.075403 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="proxy-httpd" containerID="cri-o://11a96962ca7995f87e8cc7aa515820cb8d68c7b9dbdcdc52b0e713739a7dcc07" gracePeriod=30 Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.075475 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="sg-core" containerID="cri-o://01f921df2578275927d80bec6132a65055bb1a55f67db373754a9206b7b18a9f" gracePeriod=30 Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.075518 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-notification-agent" containerID="cri-o://4db658a9d5c00870a89924fd37a8139963c2d0a043beea10f133391be09b9a33" gracePeriod=30 Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.373650 4852 generic.go:334] "Generic (PLEG): container finished" podID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerID="01f921df2578275927d80bec6132a65055bb1a55f67db373754a9206b7b18a9f" exitCode=2 Jan 29 12:28:46 crc kubenswrapper[4852]: I0129 12:28:46.374050 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerDied","Data":"01f921df2578275927d80bec6132a65055bb1a55f67db373754a9206b7b18a9f"} Jan 29 12:28:47 crc kubenswrapper[4852]: I0129 12:28:47.390632 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e385c621-ad37-4274-90fd-0d71c5e7ddd3","Type":"ContainerStarted","Data":"aac4ffbb5a944bd557eb9f015c231ccdb1f595750a054ad7f601c0642c60afe8"} Jan 29 12:28:47 crc kubenswrapper[4852]: I0129 12:28:47.395439 4852 generic.go:334] "Generic (PLEG): container finished" podID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerID="11a96962ca7995f87e8cc7aa515820cb8d68c7b9dbdcdc52b0e713739a7dcc07" exitCode=0 Jan 29 12:28:47 crc kubenswrapper[4852]: I0129 12:28:47.395603 4852 generic.go:334] "Generic (PLEG): container finished" podID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerID="1308a8fd57b60dc1d2f2803527086e087e44c6783c97ee24a9c1e67d3b68ed56" exitCode=0 Jan 29 12:28:47 crc kubenswrapper[4852]: I0129 12:28:47.395760 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerDied","Data":"11a96962ca7995f87e8cc7aa515820cb8d68c7b9dbdcdc52b0e713739a7dcc07"} Jan 29 12:28:47 crc kubenswrapper[4852]: I0129 12:28:47.395914 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerDied","Data":"1308a8fd57b60dc1d2f2803527086e087e44c6783c97ee24a9c1e67d3b68ed56"} Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.439549 4852 generic.go:334] "Generic (PLEG): container finished" podID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerID="4db658a9d5c00870a89924fd37a8139963c2d0a043beea10f133391be09b9a33" exitCode=0 Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.439629 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerDied","Data":"4db658a9d5c00870a89924fd37a8139963c2d0a043beea10f133391be09b9a33"} Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.442782 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e385c621-ad37-4274-90fd-0d71c5e7ddd3","Type":"ContainerStarted","Data":"fe0e84dc308be0168ed0d97c750ac919496f1292749421079af310507183a48d"} Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.754181 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.917370 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7shz\" (UniqueName: \"kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.917856 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.917903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.917984 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.918011 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.918043 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.918112 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle\") pod \"d0f1b92d-6285-478d-98c1-278aa2426e5f\" (UID: \"d0f1b92d-6285-478d-98c1-278aa2426e5f\") " Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.920110 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.930315 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.939903 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts" (OuterVolumeSpecName: "scripts") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:51 crc kubenswrapper[4852]: I0129 12:28:51.947763 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz" (OuterVolumeSpecName: "kube-api-access-t7shz") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "kube-api-access-t7shz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.020597 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7shz\" (UniqueName: \"kubernetes.io/projected/d0f1b92d-6285-478d-98c1-278aa2426e5f-kube-api-access-t7shz\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.020632 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.020642 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.020651 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1b92d-6285-478d-98c1-278aa2426e5f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.043312 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.123051 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.141819 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data" (OuterVolumeSpecName: "config-data") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.141965 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0f1b92d-6285-478d-98c1-278aa2426e5f" (UID: "d0f1b92d-6285-478d-98c1-278aa2426e5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.241128 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.241226 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1b92d-6285-478d-98c1-278aa2426e5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.458642 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1b92d-6285-478d-98c1-278aa2426e5f","Type":"ContainerDied","Data":"94369b58116b48b6a33a3b92b0d33280a93efa8fe377abb35abd7fe9be23f349"} Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.458727 4852 scope.go:117] "RemoveContainer" containerID="11a96962ca7995f87e8cc7aa515820cb8d68c7b9dbdcdc52b0e713739a7dcc07" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.459023 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.464192 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.464567 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.507144 4852 scope.go:117] "RemoveContainer" containerID="01f921df2578275927d80bec6132a65055bb1a55f67db373754a9206b7b18a9f" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.516388 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.530888 4852 scope.go:117] "RemoveContainer" containerID="4db658a9d5c00870a89924fd37a8139963c2d0a043beea10f133391be09b9a33" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.537685 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.575465 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.575905 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-notification-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.575919 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-notification-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.575940 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="registry-server" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.575946 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="registry-server" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.576147 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="sg-core" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576153 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="sg-core" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.576175 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="extract-utilities" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576181 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="extract-utilities" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.576195 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="extract-content" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576200 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="extract-content" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.576214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="proxy-httpd" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576220 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="proxy-httpd" Jan 29 12:28:52 crc kubenswrapper[4852]: E0129 12:28:52.576232 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-central-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576237 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-central-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576431 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-notification-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576440 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b31a53b5-c783-4f5a-97c8-5b203e89a936" containerName="registry-server" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576449 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="proxy-httpd" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576467 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="ceilometer-central-agent" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.576479 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" containerName="sg-core" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.578287 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.586235 4852 scope.go:117] "RemoveContainer" containerID="1308a8fd57b60dc1d2f2803527086e087e44c6783c97ee24a9c1e67d3b68ed56" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.586574 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.586699 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.604075 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.751837 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752189 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752446 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7jzx\" (UniqueName: \"kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752515 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752879 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752937 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.752960 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.854785 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.855202 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.855366 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7jzx\" (UniqueName: \"kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.855396 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.855453 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.856171 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.856206 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.856259 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.856638 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.860831 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.862250 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.865025 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.876245 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.889368 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7jzx\" (UniqueName: \"kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx\") pod \"ceilometer-0\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " pod="openstack/ceilometer-0" Jan 29 12:28:52 crc kubenswrapper[4852]: I0129 12:28:52.906547 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:28:53 crc kubenswrapper[4852]: I0129 12:28:53.488997 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f1b92d-6285-478d-98c1-278aa2426e5f" path="/var/lib/kubelet/pods/d0f1b92d-6285-478d-98c1-278aa2426e5f/volumes" Jan 29 12:28:54 crc kubenswrapper[4852]: I0129 12:28:54.494463 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:28:55 crc kubenswrapper[4852]: I0129 12:28:55.497356 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e385c621-ad37-4274-90fd-0d71c5e7ddd3","Type":"ContainerStarted","Data":"6e1e4e62e479d06dc533f59d8e8dc27be74961b463b1013bb57a052fe95f442a"} Jan 29 12:28:55 crc kubenswrapper[4852]: I0129 12:28:55.498861 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerStarted","Data":"126fab660da23c1346bb4e951d31df87ef76ad7b0a1752d3abba0a98b670ca84"} Jan 29 12:28:56 crc kubenswrapper[4852]: I0129 12:28:56.512848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerStarted","Data":"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5"} Jan 29 12:28:58 crc kubenswrapper[4852]: I0129 12:28:58.537535 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerStarted","Data":"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7"} Jan 29 12:29:03 crc kubenswrapper[4852]: I0129 12:29:03.599636 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerStarted","Data":"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05"} Jan 29 12:29:03 crc kubenswrapper[4852]: I0129 12:29:03.602342 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e385c621-ad37-4274-90fd-0d71c5e7ddd3","Type":"ContainerStarted","Data":"a2617c0327f6286196fed2726b4189ed7d56a9e24985446292ac58068aa61386"} Jan 29 12:29:03 crc kubenswrapper[4852]: I0129 12:29:03.642282 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.7277150580000002 podStartE2EDuration="20.642257797s" podCreationTimestamp="2026-01-29 12:28:43 +0000 UTC" firstStartedPulling="2026-01-29 12:28:44.851228202 +0000 UTC m=+6422.068559336" lastFinishedPulling="2026-01-29 12:29:02.765770921 +0000 UTC m=+6439.983102075" observedRunningTime="2026-01-29 12:29:03.62883832 +0000 UTC m=+6440.846169464" watchObservedRunningTime="2026-01-29 12:29:03.642257797 +0000 UTC m=+6440.859588941" Jan 29 12:29:06 crc kubenswrapper[4852]: I0129 12:29:06.462952 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:29:06 crc kubenswrapper[4852]: E0129 12:29:06.463796 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:29:06 crc kubenswrapper[4852]: I0129 12:29:06.639949 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerStarted","Data":"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d"} Jan 29 12:29:06 crc kubenswrapper[4852]: I0129 12:29:06.641868 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 12:29:06 crc kubenswrapper[4852]: I0129 12:29:06.679387 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.894039513 podStartE2EDuration="14.679369613s" podCreationTimestamp="2026-01-29 12:28:52 +0000 UTC" firstStartedPulling="2026-01-29 12:28:54.500209452 +0000 UTC m=+6431.717540576" lastFinishedPulling="2026-01-29 12:29:06.285539552 +0000 UTC m=+6443.502870676" observedRunningTime="2026-01-29 12:29:06.676883082 +0000 UTC m=+6443.894214246" watchObservedRunningTime="2026-01-29 12:29:06.679369613 +0000 UTC m=+6443.896700737" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.459375 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-pdxw5"] Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.462681 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.485254 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-pdxw5"] Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.563921 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-1c50-account-create-update-l8bdj"] Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.566999 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.569799 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.574718 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-1c50-account-create-update-l8bdj"] Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.615409 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsbsn\" (UniqueName: \"kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.615854 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.718072 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkhss\" (UniqueName: \"kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.718140 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsbsn\" (UniqueName: \"kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.718260 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.718419 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.719089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.735634 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsbsn\" (UniqueName: \"kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn\") pod \"manila-db-create-pdxw5\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.790085 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.822967 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkhss\" (UniqueName: \"kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.823394 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.824118 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.843122 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkhss\" (UniqueName: \"kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss\") pod \"manila-1c50-account-create-update-l8bdj\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:11 crc kubenswrapper[4852]: I0129 12:29:11.887853 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:12 crc kubenswrapper[4852]: I0129 12:29:12.420831 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-1c50-account-create-update-l8bdj"] Jan 29 12:29:12 crc kubenswrapper[4852]: I0129 12:29:12.434847 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-pdxw5"] Jan 29 12:29:12 crc kubenswrapper[4852]: I0129 12:29:12.704759 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-pdxw5" event={"ID":"a15935b8-7558-4137-aef2-10a574579a5c","Type":"ContainerStarted","Data":"b4a61d60caa13ff54703b9488bc583cd3191b0efab54fe5aff2fef779c81359f"} Jan 29 12:29:12 crc kubenswrapper[4852]: I0129 12:29:12.706052 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1c50-account-create-update-l8bdj" event={"ID":"8d743e65-13db-4231-8871-a60e305a8974","Type":"ContainerStarted","Data":"7cd5a1ec2314d94050585e069169f73fb953109619384f4b3bbd77091f36e36f"} Jan 29 12:29:13 crc kubenswrapper[4852]: I0129 12:29:13.716708 4852 generic.go:334] "Generic (PLEG): container finished" podID="a15935b8-7558-4137-aef2-10a574579a5c" containerID="4dc555e9bf650bd00339160c0bb4569bd21ee3dcb1a874dfd4236948b1056fbc" exitCode=0 Jan 29 12:29:13 crc kubenswrapper[4852]: I0129 12:29:13.716757 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-pdxw5" event={"ID":"a15935b8-7558-4137-aef2-10a574579a5c","Type":"ContainerDied","Data":"4dc555e9bf650bd00339160c0bb4569bd21ee3dcb1a874dfd4236948b1056fbc"} Jan 29 12:29:13 crc kubenswrapper[4852]: I0129 12:29:13.720197 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1c50-account-create-update-l8bdj" event={"ID":"8d743e65-13db-4231-8871-a60e305a8974","Type":"ContainerStarted","Data":"ed7dd709b05b8904d8d7e42e124ff917b4ae2d1aad24c259ac25264e516460e1"} Jan 29 12:29:13 crc kubenswrapper[4852]: I0129 12:29:13.755982 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-1c50-account-create-update-l8bdj" podStartSLOduration=2.755961158 podStartE2EDuration="2.755961158s" podCreationTimestamp="2026-01-29 12:29:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:13.746170549 +0000 UTC m=+6450.963501683" watchObservedRunningTime="2026-01-29 12:29:13.755961158 +0000 UTC m=+6450.973292302" Jan 29 12:29:14 crc kubenswrapper[4852]: I0129 12:29:14.754717 4852 generic.go:334] "Generic (PLEG): container finished" podID="8d743e65-13db-4231-8871-a60e305a8974" containerID="ed7dd709b05b8904d8d7e42e124ff917b4ae2d1aad24c259ac25264e516460e1" exitCode=0 Jan 29 12:29:14 crc kubenswrapper[4852]: I0129 12:29:14.754880 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1c50-account-create-update-l8bdj" event={"ID":"8d743e65-13db-4231-8871-a60e305a8974","Type":"ContainerDied","Data":"ed7dd709b05b8904d8d7e42e124ff917b4ae2d1aad24c259ac25264e516460e1"} Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.224468 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.395711 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsbsn\" (UniqueName: \"kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn\") pod \"a15935b8-7558-4137-aef2-10a574579a5c\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.395903 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts\") pod \"a15935b8-7558-4137-aef2-10a574579a5c\" (UID: \"a15935b8-7558-4137-aef2-10a574579a5c\") " Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.396627 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a15935b8-7558-4137-aef2-10a574579a5c" (UID: "a15935b8-7558-4137-aef2-10a574579a5c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.396894 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a15935b8-7558-4137-aef2-10a574579a5c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.402343 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn" (OuterVolumeSpecName: "kube-api-access-dsbsn") pod "a15935b8-7558-4137-aef2-10a574579a5c" (UID: "a15935b8-7558-4137-aef2-10a574579a5c"). InnerVolumeSpecName "kube-api-access-dsbsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.498887 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsbsn\" (UniqueName: \"kubernetes.io/projected/a15935b8-7558-4137-aef2-10a574579a5c-kube-api-access-dsbsn\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.767119 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-pdxw5" event={"ID":"a15935b8-7558-4137-aef2-10a574579a5c","Type":"ContainerDied","Data":"b4a61d60caa13ff54703b9488bc583cd3191b0efab54fe5aff2fef779c81359f"} Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.767164 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4a61d60caa13ff54703b9488bc583cd3191b0efab54fe5aff2fef779c81359f" Jan 29 12:29:15 crc kubenswrapper[4852]: I0129 12:29:15.767143 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-pdxw5" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.156386 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.314659 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkhss\" (UniqueName: \"kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss\") pod \"8d743e65-13db-4231-8871-a60e305a8974\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.314930 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts\") pod \"8d743e65-13db-4231-8871-a60e305a8974\" (UID: \"8d743e65-13db-4231-8871-a60e305a8974\") " Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.315478 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8d743e65-13db-4231-8871-a60e305a8974" (UID: "8d743e65-13db-4231-8871-a60e305a8974"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.316019 4852 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8d743e65-13db-4231-8871-a60e305a8974-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.319931 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss" (OuterVolumeSpecName: "kube-api-access-qkhss") pod "8d743e65-13db-4231-8871-a60e305a8974" (UID: "8d743e65-13db-4231-8871-a60e305a8974"). InnerVolumeSpecName "kube-api-access-qkhss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.417406 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkhss\" (UniqueName: \"kubernetes.io/projected/8d743e65-13db-4231-8871-a60e305a8974-kube-api-access-qkhss\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.784869 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1c50-account-create-update-l8bdj" event={"ID":"8d743e65-13db-4231-8871-a60e305a8974","Type":"ContainerDied","Data":"7cd5a1ec2314d94050585e069169f73fb953109619384f4b3bbd77091f36e36f"} Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.784925 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cd5a1ec2314d94050585e069169f73fb953109619384f4b3bbd77091f36e36f" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.785025 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1c50-account-create-update-l8bdj" Jan 29 12:29:16 crc kubenswrapper[4852]: E0129 12:29:16.894964 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d743e65_13db_4231_8871_a60e305a8974.slice/crio-7cd5a1ec2314d94050585e069169f73fb953109619384f4b3bbd77091f36e36f\": RecentStats: unable to find data in memory cache]" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.919396 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-pvkg8"] Jan 29 12:29:16 crc kubenswrapper[4852]: E0129 12:29:16.919948 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a15935b8-7558-4137-aef2-10a574579a5c" containerName="mariadb-database-create" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.920338 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="a15935b8-7558-4137-aef2-10a574579a5c" containerName="mariadb-database-create" Jan 29 12:29:16 crc kubenswrapper[4852]: E0129 12:29:16.920359 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d743e65-13db-4231-8871-a60e305a8974" containerName="mariadb-account-create-update" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.920365 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d743e65-13db-4231-8871-a60e305a8974" containerName="mariadb-account-create-update" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.920624 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="a15935b8-7558-4137-aef2-10a574579a5c" containerName="mariadb-database-create" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.920640 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d743e65-13db-4231-8871-a60e305a8974" containerName="mariadb-account-create-update" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.921344 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.923748 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-nzp9q" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.924488 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Jan 29 12:29:16 crc kubenswrapper[4852]: I0129 12:29:16.946634 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-pvkg8"] Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.034387 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.034467 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxt5v\" (UniqueName: \"kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.034626 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.034746 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.137539 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.137714 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.137731 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxt5v\" (UniqueName: \"kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.137791 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.144961 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.147224 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.151677 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.166227 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxt5v\" (UniqueName: \"kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v\") pod \"manila-db-sync-pvkg8\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.252657 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:17 crc kubenswrapper[4852]: I0129 12:29:17.468209 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:29:17 crc kubenswrapper[4852]: E0129 12:29:17.468777 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:29:18 crc kubenswrapper[4852]: I0129 12:29:18.099051 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-pvkg8"] Jan 29 12:29:18 crc kubenswrapper[4852]: W0129 12:29:18.102149 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfec3d121_a2ad_4784_8722_5a62104ad91f.slice/crio-a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea WatchSource:0}: Error finding container a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea: Status 404 returned error can't find the container with id a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea Jan 29 12:29:18 crc kubenswrapper[4852]: I0129 12:29:18.806738 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-pvkg8" event={"ID":"fec3d121-a2ad-4784-8722-5a62104ad91f","Type":"ContainerStarted","Data":"a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea"} Jan 29 12:29:22 crc kubenswrapper[4852]: I0129 12:29:22.920696 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 12:29:23 crc kubenswrapper[4852]: I0129 12:29:23.870009 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-pvkg8" event={"ID":"fec3d121-a2ad-4784-8722-5a62104ad91f","Type":"ContainerStarted","Data":"9cdbfe80720acdbbbd0398878f6b133f4d371fefda295500af2f0ee1c2e8840f"} Jan 29 12:29:23 crc kubenswrapper[4852]: I0129 12:29:23.895425 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-pvkg8" podStartSLOduration=3.520524178 podStartE2EDuration="7.895409544s" podCreationTimestamp="2026-01-29 12:29:16 +0000 UTC" firstStartedPulling="2026-01-29 12:29:18.10482984 +0000 UTC m=+6455.322160984" lastFinishedPulling="2026-01-29 12:29:22.479715216 +0000 UTC m=+6459.697046350" observedRunningTime="2026-01-29 12:29:23.887315217 +0000 UTC m=+6461.104646361" watchObservedRunningTime="2026-01-29 12:29:23.895409544 +0000 UTC m=+6461.112740678" Jan 29 12:29:26 crc kubenswrapper[4852]: I0129 12:29:26.900803 4852 generic.go:334] "Generic (PLEG): container finished" podID="fec3d121-a2ad-4784-8722-5a62104ad91f" containerID="9cdbfe80720acdbbbd0398878f6b133f4d371fefda295500af2f0ee1c2e8840f" exitCode=0 Jan 29 12:29:26 crc kubenswrapper[4852]: I0129 12:29:26.900922 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-pvkg8" event={"ID":"fec3d121-a2ad-4784-8722-5a62104ad91f","Type":"ContainerDied","Data":"9cdbfe80720acdbbbd0398878f6b133f4d371fefda295500af2f0ee1c2e8840f"} Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.514662 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.598931 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle\") pod \"fec3d121-a2ad-4784-8722-5a62104ad91f\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.599039 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data\") pod \"fec3d121-a2ad-4784-8722-5a62104ad91f\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.599132 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data\") pod \"fec3d121-a2ad-4784-8722-5a62104ad91f\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.599359 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxt5v\" (UniqueName: \"kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v\") pod \"fec3d121-a2ad-4784-8722-5a62104ad91f\" (UID: \"fec3d121-a2ad-4784-8722-5a62104ad91f\") " Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.604754 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "fec3d121-a2ad-4784-8722-5a62104ad91f" (UID: "fec3d121-a2ad-4784-8722-5a62104ad91f"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.606251 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v" (OuterVolumeSpecName: "kube-api-access-rxt5v") pod "fec3d121-a2ad-4784-8722-5a62104ad91f" (UID: "fec3d121-a2ad-4784-8722-5a62104ad91f"). InnerVolumeSpecName "kube-api-access-rxt5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.608831 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data" (OuterVolumeSpecName: "config-data") pod "fec3d121-a2ad-4784-8722-5a62104ad91f" (UID: "fec3d121-a2ad-4784-8722-5a62104ad91f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.635634 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fec3d121-a2ad-4784-8722-5a62104ad91f" (UID: "fec3d121-a2ad-4784-8722-5a62104ad91f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.703293 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxt5v\" (UniqueName: \"kubernetes.io/projected/fec3d121-a2ad-4784-8722-5a62104ad91f-kube-api-access-rxt5v\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.703344 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.703365 4852 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-job-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.703382 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec3d121-a2ad-4784-8722-5a62104ad91f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.926428 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-pvkg8" event={"ID":"fec3d121-a2ad-4784-8722-5a62104ad91f","Type":"ContainerDied","Data":"a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea"} Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.926481 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1127bdfdd02e3fe05b049208119fa5b4a68c8eefb627e9baada5c9e5b1755ea" Jan 29 12:29:28 crc kubenswrapper[4852]: I0129 12:29:28.926565 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-pvkg8" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.231787 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: E0129 12:29:29.232539 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec3d121-a2ad-4784-8722-5a62104ad91f" containerName="manila-db-sync" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.232562 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec3d121-a2ad-4784-8722-5a62104ad91f" containerName="manila-db-sync" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.232909 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec3d121-a2ad-4784-8722-5a62104ad91f" containerName="manila-db-sync" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.234361 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.237814 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.238426 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.238805 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.269766 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-nzp9q" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.314330 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.317266 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.317300 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.317536 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q7pq\" (UniqueName: \"kubernetes.io/projected/0ec170b2-e22f-40a0-a407-e6b873103fc1-kube-api-access-2q7pq\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.317632 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.318476 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.318643 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-scripts\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.318699 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ec170b2-e22f-40a0-a407-e6b873103fc1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.333048 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.341803 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.348608 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.414638 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.416682 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.421654 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.421735 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.421763 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-scripts\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.421796 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.421939 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422014 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422121 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-ceph\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422184 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q7pq\" (UniqueName: \"kubernetes.io/projected/0ec170b2-e22f-40a0-a407-e6b873103fc1-kube-api-access-2q7pq\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422225 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422254 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdhpz\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-kube-api-access-pdhpz\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422292 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422309 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422353 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-scripts\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422394 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ec170b2-e22f-40a0-a407-e6b873103fc1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.422523 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0ec170b2-e22f-40a0-a407-e6b873103fc1-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.430952 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.432754 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.436734 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-config-data\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.441142 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-scripts\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.443247 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ec170b2-e22f-40a0-a407-e6b873103fc1-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.456244 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q7pq\" (UniqueName: \"kubernetes.io/projected/0ec170b2-e22f-40a0-a407-e6b873103fc1-kube-api-access-2q7pq\") pod \"manila-scheduler-0\" (UID: \"0ec170b2-e22f-40a0-a407-e6b873103fc1\") " pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531709 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-ceph\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531766 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdhpz\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-kube-api-access-pdhpz\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531789 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531810 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lvx9\" (UniqueName: \"kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531866 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531894 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.531970 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532002 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-scripts\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532020 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532059 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532079 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532128 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532562 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.532812 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.539388 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.541711 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.541718 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-ceph\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.549854 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.551931 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-scripts\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.552423 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.569782 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.570382 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdhpz\" (UniqueName: \"kubernetes.io/projected/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-kube-api-access-pdhpz\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.570865 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.571102 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8e48974-dfb6-4275-9c9e-0ff74a9f06d1-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1\") " pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.604203 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634187 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lvx9\" (UniqueName: \"kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634427 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634489 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634557 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634594 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc7d9b76-36d6-435f-9999-e9e468154bda-etc-machine-id\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634613 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634651 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzl6w\" (UniqueName: \"kubernetes.io/projected/cc7d9b76-36d6-435f-9999-e9e468154bda-kube-api-access-kzl6w\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634677 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634708 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data-custom\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634755 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-scripts\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634773 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc7d9b76-36d6-435f-9999-e9e468154bda-logs\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.634799 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.637236 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.638482 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.638779 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.638860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.657313 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lvx9\" (UniqueName: \"kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9\") pod \"dnsmasq-dns-6cf6ff4b45-dkm5w\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.673667 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.720395 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.736917 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737104 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc7d9b76-36d6-435f-9999-e9e468154bda-etc-machine-id\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737124 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737176 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzl6w\" (UniqueName: \"kubernetes.io/projected/cc7d9b76-36d6-435f-9999-e9e468154bda-kube-api-access-kzl6w\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737214 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data-custom\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737278 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-scripts\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737275 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cc7d9b76-36d6-435f-9999-e9e468154bda-etc-machine-id\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737295 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc7d9b76-36d6-435f-9999-e9e468154bda-logs\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.737856 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc7d9b76-36d6-435f-9999-e9e468154bda-logs\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.767521 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data-custom\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.767690 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.767801 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-scripts\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.768058 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc7d9b76-36d6-435f-9999-e9e468154bda-config-data\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:29 crc kubenswrapper[4852]: I0129 12:29:29.771211 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzl6w\" (UniqueName: \"kubernetes.io/projected/cc7d9b76-36d6-435f-9999-e9e468154bda-kube-api-access-kzl6w\") pod \"manila-api-0\" (UID: \"cc7d9b76-36d6-435f-9999-e9e468154bda\") " pod="openstack/manila-api-0" Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.035299 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.198798 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Jan 29 12:29:30 crc kubenswrapper[4852]: W0129 12:29:30.430268 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8e48974_dfb6_4275_9c9e_0ff74a9f06d1.slice/crio-9d5f9b0d79c25adec1cf465890cd9ec063fbdef707a4f364e5b666e19bb7acad WatchSource:0}: Error finding container 9d5f9b0d79c25adec1cf465890cd9ec063fbdef707a4f364e5b666e19bb7acad: Status 404 returned error can't find the container with id 9d5f9b0d79c25adec1cf465890cd9ec063fbdef707a4f364e5b666e19bb7acad Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.431364 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.512516 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.718024 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Jan 29 12:29:30 crc kubenswrapper[4852]: W0129 12:29:30.724739 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc7d9b76_36d6_435f_9999_e9e468154bda.slice/crio-2358fac0aeec7c92256f862c9899e6c0abaed931c3342f0248c0a84855b7613a WatchSource:0}: Error finding container 2358fac0aeec7c92256f862c9899e6c0abaed931c3342f0248c0a84855b7613a: Status 404 returned error can't find the container with id 2358fac0aeec7c92256f862c9899e6c0abaed931c3342f0248c0a84855b7613a Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.951885 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1","Type":"ContainerStarted","Data":"9d5f9b0d79c25adec1cf465890cd9ec063fbdef707a4f364e5b666e19bb7acad"} Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.955488 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"cc7d9b76-36d6-435f-9999-e9e468154bda","Type":"ContainerStarted","Data":"2358fac0aeec7c92256f862c9899e6c0abaed931c3342f0248c0a84855b7613a"} Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.959905 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0ec170b2-e22f-40a0-a407-e6b873103fc1","Type":"ContainerStarted","Data":"adf5a073fe2aa6ade6a5c82b713cdbf766c62691018133b81535caff69a4125b"} Jan 29 12:29:30 crc kubenswrapper[4852]: I0129 12:29:30.961387 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" event={"ID":"fa36c18f-3bc8-414d-9768-c3d23b84ae03","Type":"ContainerStarted","Data":"2a6660c41f723e28eb6bbc3c3699b203e6e2febd9dc80688d20363ccf526ed40"} Jan 29 12:29:31 crc kubenswrapper[4852]: I0129 12:29:31.471566 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:29:31 crc kubenswrapper[4852]: E0129 12:29:31.472060 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:29:31 crc kubenswrapper[4852]: I0129 12:29:31.977725 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"cc7d9b76-36d6-435f-9999-e9e468154bda","Type":"ContainerStarted","Data":"9c943d0e784117bebd0548d1dd78b85547f2410aca2159cf1b357cb68800c451"} Jan 29 12:29:31 crc kubenswrapper[4852]: I0129 12:29:31.987312 4852 generic.go:334] "Generic (PLEG): container finished" podID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerID="5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e" exitCode=0 Jan 29 12:29:31 crc kubenswrapper[4852]: I0129 12:29:31.987387 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" event={"ID":"fa36c18f-3bc8-414d-9768-c3d23b84ae03","Type":"ContainerDied","Data":"5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e"} Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:32.999491 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"cc7d9b76-36d6-435f-9999-e9e468154bda","Type":"ContainerStarted","Data":"83a1ac363fb3fbfb36ed6845c66a19add2377dde5318b5aa141306e13da45f2d"} Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.000504 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.002690 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0ec170b2-e22f-40a0-a407-e6b873103fc1","Type":"ContainerStarted","Data":"8cf98d35674a54cc1b7bc096ea53942493488da62e8263e4b75130d8cc87537d"} Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.002734 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0ec170b2-e22f-40a0-a407-e6b873103fc1","Type":"ContainerStarted","Data":"2f624698706d793ffefcc80a5ce697f9a6499764ff8f85fb02185aac30d18dc4"} Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.006057 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" event={"ID":"fa36c18f-3bc8-414d-9768-c3d23b84ae03","Type":"ContainerStarted","Data":"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c"} Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.006241 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.028199 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.028182492 podStartE2EDuration="4.028182492s" podCreationTimestamp="2026-01-29 12:29:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:33.017435321 +0000 UTC m=+6470.234766455" watchObservedRunningTime="2026-01-29 12:29:33.028182492 +0000 UTC m=+6470.245513626" Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.045788 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.924541275 podStartE2EDuration="4.045768411s" podCreationTimestamp="2026-01-29 12:29:29 +0000 UTC" firstStartedPulling="2026-01-29 12:29:30.225756643 +0000 UTC m=+6467.443087767" lastFinishedPulling="2026-01-29 12:29:31.346983769 +0000 UTC m=+6468.564314903" observedRunningTime="2026-01-29 12:29:33.03752411 +0000 UTC m=+6470.254855244" watchObservedRunningTime="2026-01-29 12:29:33.045768411 +0000 UTC m=+6470.263099545" Jan 29 12:29:33 crc kubenswrapper[4852]: I0129 12:29:33.062923 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" podStartSLOduration=4.062901308 podStartE2EDuration="4.062901308s" podCreationTimestamp="2026-01-29 12:29:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:33.055664892 +0000 UTC m=+6470.272996026" watchObservedRunningTime="2026-01-29 12:29:33.062901308 +0000 UTC m=+6470.280232452" Jan 29 12:29:39 crc kubenswrapper[4852]: I0129 12:29:39.605409 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Jan 29 12:29:39 crc kubenswrapper[4852]: I0129 12:29:39.724111 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:29:39 crc kubenswrapper[4852]: I0129 12:29:39.922072 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:29:39 crc kubenswrapper[4852]: I0129 12:29:39.922409 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="dnsmasq-dns" containerID="cri-o://441cc2fda8cb89c9963bf264ac097d76b32f3386322c4c9005e5e73a6ee67ba4" gracePeriod=10 Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.050847 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.79:5353: connect: connection refused" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.085255 4852 generic.go:334] "Generic (PLEG): container finished" podID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerID="441cc2fda8cb89c9963bf264ac097d76b32f3386322c4c9005e5e73a6ee67ba4" exitCode=0 Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.085297 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" event={"ID":"4100243b-aafb-4ca0-8822-ecd70ec56f55","Type":"ContainerDied","Data":"441cc2fda8cb89c9963bf264ac097d76b32f3386322c4c9005e5e73a6ee67ba4"} Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.509737 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.633845 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb\") pod \"4100243b-aafb-4ca0-8822-ecd70ec56f55\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.634827 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config\") pod \"4100243b-aafb-4ca0-8822-ecd70ec56f55\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.634892 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc\") pod \"4100243b-aafb-4ca0-8822-ecd70ec56f55\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.634942 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g42h2\" (UniqueName: \"kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2\") pod \"4100243b-aafb-4ca0-8822-ecd70ec56f55\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.635142 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb\") pod \"4100243b-aafb-4ca0-8822-ecd70ec56f55\" (UID: \"4100243b-aafb-4ca0-8822-ecd70ec56f55\") " Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.640744 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2" (OuterVolumeSpecName: "kube-api-access-g42h2") pod "4100243b-aafb-4ca0-8822-ecd70ec56f55" (UID: "4100243b-aafb-4ca0-8822-ecd70ec56f55"). InnerVolumeSpecName "kube-api-access-g42h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.695714 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4100243b-aafb-4ca0-8822-ecd70ec56f55" (UID: "4100243b-aafb-4ca0-8822-ecd70ec56f55"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.703314 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4100243b-aafb-4ca0-8822-ecd70ec56f55" (UID: "4100243b-aafb-4ca0-8822-ecd70ec56f55"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.704140 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4100243b-aafb-4ca0-8822-ecd70ec56f55" (UID: "4100243b-aafb-4ca0-8822-ecd70ec56f55"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.712080 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config" (OuterVolumeSpecName: "config") pod "4100243b-aafb-4ca0-8822-ecd70ec56f55" (UID: "4100243b-aafb-4ca0-8822-ecd70ec56f55"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.737984 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.738014 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.738025 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.738034 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4100243b-aafb-4ca0-8822-ecd70ec56f55-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:40 crc kubenswrapper[4852]: I0129 12:29:40.738044 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g42h2\" (UniqueName: \"kubernetes.io/projected/4100243b-aafb-4ca0-8822-ecd70ec56f55-kube-api-access-g42h2\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.108765 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.108744 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59dcdddb99-s7jdf" event={"ID":"4100243b-aafb-4ca0-8822-ecd70ec56f55","Type":"ContainerDied","Data":"9cbd3fb49f0a5bbad463ffc62398fa5f51bf00c4e620ab816297046a8817a78a"} Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.112704 4852 scope.go:117] "RemoveContainer" containerID="441cc2fda8cb89c9963bf264ac097d76b32f3386322c4c9005e5e73a6ee67ba4" Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.127435 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1","Type":"ContainerStarted","Data":"2081086ba93ba91832c3e75ed6d0581e19b2cb48d34f28b936c190545f105bed"} Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.157444 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.171996 4852 scope.go:117] "RemoveContainer" containerID="d3382521d5c606c5dbaa1469e8343a67f404c4992b50e3eb09c4804bcc863c60" Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.172417 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59dcdddb99-s7jdf"] Jan 29 12:29:41 crc kubenswrapper[4852]: I0129 12:29:41.478455 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" path="/var/lib/kubelet/pods/4100243b-aafb-4ca0-8822-ecd70ec56f55/volumes" Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.142084 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"a8e48974-dfb6-4275-9c9e-0ff74a9f06d1","Type":"ContainerStarted","Data":"a470f0802825de76090a1b5bab2990bc02eb17e4acecb447c473dd6bd01ed9f5"} Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.179335 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.53536238 podStartE2EDuration="13.179310038s" podCreationTimestamp="2026-01-29 12:29:29 +0000 UTC" firstStartedPulling="2026-01-29 12:29:30.431823751 +0000 UTC m=+6467.649154885" lastFinishedPulling="2026-01-29 12:29:40.075771409 +0000 UTC m=+6477.293102543" observedRunningTime="2026-01-29 12:29:42.166495596 +0000 UTC m=+6479.383826740" watchObservedRunningTime="2026-01-29 12:29:42.179310038 +0000 UTC m=+6479.396641172" Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.304610 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.304900 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-central-agent" containerID="cri-o://fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5" gracePeriod=30 Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.304916 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="proxy-httpd" containerID="cri-o://65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d" gracePeriod=30 Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.304984 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-notification-agent" containerID="cri-o://c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7" gracePeriod=30 Jan 29 12:29:42 crc kubenswrapper[4852]: I0129 12:29:42.305001 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="sg-core" containerID="cri-o://509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05" gracePeriod=30 Jan 29 12:29:43 crc kubenswrapper[4852]: I0129 12:29:43.163710 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerID="65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d" exitCode=0 Jan 29 12:29:43 crc kubenswrapper[4852]: I0129 12:29:43.164037 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerID="509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05" exitCode=2 Jan 29 12:29:43 crc kubenswrapper[4852]: I0129 12:29:43.163778 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerDied","Data":"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d"} Jan 29 12:29:43 crc kubenswrapper[4852]: I0129 12:29:43.164082 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerDied","Data":"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05"} Jan 29 12:29:44 crc kubenswrapper[4852]: I0129 12:29:44.177817 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerID="fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5" exitCode=0 Jan 29 12:29:44 crc kubenswrapper[4852]: I0129 12:29:44.177896 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerDied","Data":"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5"} Jan 29 12:29:46 crc kubenswrapper[4852]: I0129 12:29:46.463144 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:29:46 crc kubenswrapper[4852]: E0129 12:29:46.463899 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.111284 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195191 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195240 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195352 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195422 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7jzx\" (UniqueName: \"kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195486 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.195636 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts\") pod \"9d16d709-3a50-453e-b4df-b220cb8bb547\" (UID: \"9d16d709-3a50-453e-b4df-b220cb8bb547\") " Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.197154 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.197349 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.202761 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts" (OuterVolumeSpecName: "scripts") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.204760 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx" (OuterVolumeSpecName: "kube-api-access-x7jzx") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "kube-api-access-x7jzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.226388 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.243296 4852 generic.go:334] "Generic (PLEG): container finished" podID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerID="c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7" exitCode=0 Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.243343 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerDied","Data":"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7"} Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.243414 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.243461 4852 scope.go:117] "RemoveContainer" containerID="65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.243425 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d16d709-3a50-453e-b4df-b220cb8bb547","Type":"ContainerDied","Data":"126fab660da23c1346bb4e951d31df87ef76ad7b0a1752d3abba0a98b670ca84"} Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.289297 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307129 4852 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307172 4852 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307184 4852 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d16d709-3a50-453e-b4df-b220cb8bb547-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307194 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307207 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7jzx\" (UniqueName: \"kubernetes.io/projected/9d16d709-3a50-453e-b4df-b220cb8bb547-kube-api-access-x7jzx\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.307218 4852 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.322815 4852 scope.go:117] "RemoveContainer" containerID="509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.337174 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data" (OuterVolumeSpecName: "config-data") pod "9d16d709-3a50-453e-b4df-b220cb8bb547" (UID: "9d16d709-3a50-453e-b4df-b220cb8bb547"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.345707 4852 scope.go:117] "RemoveContainer" containerID="c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.367569 4852 scope.go:117] "RemoveContainer" containerID="fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.392349 4852 scope.go:117] "RemoveContainer" containerID="65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.392868 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d\": container with ID starting with 65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d not found: ID does not exist" containerID="65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.392913 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d"} err="failed to get container status \"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d\": rpc error: code = NotFound desc = could not find container \"65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d\": container with ID starting with 65c0971373f9d431a3281a6bc084062243ec990d33aec69a71df42e32448591d not found: ID does not exist" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.392942 4852 scope.go:117] "RemoveContainer" containerID="509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.393221 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05\": container with ID starting with 509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05 not found: ID does not exist" containerID="509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.393257 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05"} err="failed to get container status \"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05\": rpc error: code = NotFound desc = could not find container \"509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05\": container with ID starting with 509c4d05811369114b28a7876b8ba095e7e80d19c9cbf370d9e2a5026acc8f05 not found: ID does not exist" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.393278 4852 scope.go:117] "RemoveContainer" containerID="c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.393509 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7\": container with ID starting with c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7 not found: ID does not exist" containerID="c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.393540 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7"} err="failed to get container status \"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7\": rpc error: code = NotFound desc = could not find container \"c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7\": container with ID starting with c630ad2b8933674e692f74005ca7daf0aee276bbee9abf70252ff7d732555fc7 not found: ID does not exist" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.393557 4852 scope.go:117] "RemoveContainer" containerID="fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.393807 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5\": container with ID starting with fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5 not found: ID does not exist" containerID="fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.393838 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5"} err="failed to get container status \"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5\": rpc error: code = NotFound desc = could not find container \"fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5\": container with ID starting with fb8ae93b0f4bcc0c027bd05e0468704a9cf0a4b76039bdcc8045ac1b174231f5 not found: ID does not exist" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.408750 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d16d709-3a50-453e-b4df-b220cb8bb547-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.579507 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.591328 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.611470 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.611933 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-central-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.611957 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-central-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.611977 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="proxy-httpd" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.611983 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="proxy-httpd" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.612003 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-notification-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612010 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-notification-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.612020 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="sg-core" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612025 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="sg-core" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.612040 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="dnsmasq-dns" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612046 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="dnsmasq-dns" Jan 29 12:29:48 crc kubenswrapper[4852]: E0129 12:29:48.612063 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="init" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612070 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="init" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612284 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="proxy-httpd" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612295 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-central-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612311 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="sg-core" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612324 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4100243b-aafb-4ca0-8822-ecd70ec56f55" containerName="dnsmasq-dns" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.612339 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" containerName="ceilometer-notification-agent" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.614376 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.616646 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.616976 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.635011 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714109 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9zkb\" (UniqueName: \"kubernetes.io/projected/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-kube-api-access-q9zkb\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714224 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-log-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714258 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-run-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714370 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714395 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-config-data\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714412 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.714504 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-scripts\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816430 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-scripts\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816486 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9zkb\" (UniqueName: \"kubernetes.io/projected/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-kube-api-access-q9zkb\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816551 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-log-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816579 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-run-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816667 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-config-data\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.816699 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.817407 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-log-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.817897 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-run-httpd\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.821384 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.821452 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-scripts\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.821572 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.821656 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-config-data\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.833465 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9zkb\" (UniqueName: \"kubernetes.io/projected/108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3-kube-api-access-q9zkb\") pod \"ceilometer-0\" (UID: \"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3\") " pod="openstack/ceilometer-0" Jan 29 12:29:48 crc kubenswrapper[4852]: I0129 12:29:48.938248 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 12:29:49 crc kubenswrapper[4852]: I0129 12:29:49.450197 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 12:29:49 crc kubenswrapper[4852]: W0129 12:29:49.456942 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod108f4beb_9e0e_4e7f_bf7d_7bcfe5959ce3.slice/crio-28b74c87862af36d6fae21e3e2c05814aa81a947f8cf492367b38fced891b39d WatchSource:0}: Error finding container 28b74c87862af36d6fae21e3e2c05814aa81a947f8cf492367b38fced891b39d: Status 404 returned error can't find the container with id 28b74c87862af36d6fae21e3e2c05814aa81a947f8cf492367b38fced891b39d Jan 29 12:29:49 crc kubenswrapper[4852]: I0129 12:29:49.482890 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d16d709-3a50-453e-b4df-b220cb8bb547" path="/var/lib/kubelet/pods/9d16d709-3a50-453e-b4df-b220cb8bb547/volumes" Jan 29 12:29:49 crc kubenswrapper[4852]: I0129 12:29:49.675136 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Jan 29 12:29:50 crc kubenswrapper[4852]: I0129 12:29:50.271515 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3","Type":"ContainerStarted","Data":"fefe6a55f39c5d73cc60714e41c85d860bb7040bfc9d383e14b966b386f14f50"} Jan 29 12:29:50 crc kubenswrapper[4852]: I0129 12:29:50.271887 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3","Type":"ContainerStarted","Data":"28b74c87862af36d6fae21e3e2c05814aa81a947f8cf492367b38fced891b39d"} Jan 29 12:29:51 crc kubenswrapper[4852]: I0129 12:29:51.148782 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Jan 29 12:29:51 crc kubenswrapper[4852]: I0129 12:29:51.285401 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3","Type":"ContainerStarted","Data":"dca7bd0dc015de971dd6c64c1ba52b77fc0ddd454301a7657ea4e01079b77145"} Jan 29 12:29:51 crc kubenswrapper[4852]: I0129 12:29:51.408790 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Jan 29 12:29:52 crc kubenswrapper[4852]: I0129 12:29:52.301998 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3","Type":"ContainerStarted","Data":"e2d29c825548de9c46268caed41378ad9638ac4030f28fd6ff7dc2c20e269d18"} Jan 29 12:29:55 crc kubenswrapper[4852]: I0129 12:29:55.333373 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3","Type":"ContainerStarted","Data":"6de847c9b8a09bea7700feb19dfd4687760a38e65d420ab92832bd8179e66c57"} Jan 29 12:29:55 crc kubenswrapper[4852]: I0129 12:29:55.334987 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 12:29:57 crc kubenswrapper[4852]: I0129 12:29:57.464226 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:29:57 crc kubenswrapper[4852]: E0129 12:29:57.464940 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.156191 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=6.87707558 podStartE2EDuration="12.156173037s" podCreationTimestamp="2026-01-29 12:29:48 +0000 UTC" firstStartedPulling="2026-01-29 12:29:49.459228843 +0000 UTC m=+6486.676559977" lastFinishedPulling="2026-01-29 12:29:54.73832629 +0000 UTC m=+6491.955657434" observedRunningTime="2026-01-29 12:29:55.363825214 +0000 UTC m=+6492.581156348" watchObservedRunningTime="2026-01-29 12:30:00.156173037 +0000 UTC m=+6497.373504171" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.157449 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn"] Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.158767 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.161054 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.161534 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.177416 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn"] Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.295531 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.296035 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.296182 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqgqt\" (UniqueName: \"kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.397864 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.397973 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqgqt\" (UniqueName: \"kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.398015 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.398914 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.404684 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.415662 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqgqt\" (UniqueName: \"kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt\") pod \"collect-profiles-29494830-mbppn\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.481890 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:00 crc kubenswrapper[4852]: I0129 12:30:00.982216 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn"] Jan 29 12:30:01 crc kubenswrapper[4852]: I0129 12:30:01.367878 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Jan 29 12:30:01 crc kubenswrapper[4852]: I0129 12:30:01.395057 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" event={"ID":"697e0b44-2a41-46c2-88a1-5a95d72a2dd5","Type":"ContainerStarted","Data":"8347f6c52e121d04edfdcb013d68b8741eeabff4fa4a1d2773b17e42188a0b83"} Jan 29 12:30:01 crc kubenswrapper[4852]: I0129 12:30:01.395114 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" event={"ID":"697e0b44-2a41-46c2-88a1-5a95d72a2dd5","Type":"ContainerStarted","Data":"06a5a8fc2f312b8cb2f47faba8da78cac791d790be3e0540e74e5451c3aef207"} Jan 29 12:30:01 crc kubenswrapper[4852]: I0129 12:30:01.431929 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" podStartSLOduration=1.431906896 podStartE2EDuration="1.431906896s" podCreationTimestamp="2026-01-29 12:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:30:01.42879059 +0000 UTC m=+6498.646121734" watchObservedRunningTime="2026-01-29 12:30:01.431906896 +0000 UTC m=+6498.649238030" Jan 29 12:30:02 crc kubenswrapper[4852]: I0129 12:30:02.406325 4852 generic.go:334] "Generic (PLEG): container finished" podID="697e0b44-2a41-46c2-88a1-5a95d72a2dd5" containerID="8347f6c52e121d04edfdcb013d68b8741eeabff4fa4a1d2773b17e42188a0b83" exitCode=0 Jan 29 12:30:02 crc kubenswrapper[4852]: I0129 12:30:02.406414 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" event={"ID":"697e0b44-2a41-46c2-88a1-5a95d72a2dd5","Type":"ContainerDied","Data":"8347f6c52e121d04edfdcb013d68b8741eeabff4fa4a1d2773b17e42188a0b83"} Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.038532 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-skb4w"] Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.050438 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-skb4w"] Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.483820 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53624bd6-a8fc-485a-a225-4ed4a82cdb06" path="/var/lib/kubelet/pods/53624bd6-a8fc-485a-a225-4ed4a82cdb06/volumes" Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.847951 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.977599 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqgqt\" (UniqueName: \"kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt\") pod \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.977724 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume\") pod \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.978393 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume" (OuterVolumeSpecName: "config-volume") pod "697e0b44-2a41-46c2-88a1-5a95d72a2dd5" (UID: "697e0b44-2a41-46c2-88a1-5a95d72a2dd5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.980918 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume\") pod \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\" (UID: \"697e0b44-2a41-46c2-88a1-5a95d72a2dd5\") " Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.981501 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.983275 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "697e0b44-2a41-46c2-88a1-5a95d72a2dd5" (UID: "697e0b44-2a41-46c2-88a1-5a95d72a2dd5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:30:03 crc kubenswrapper[4852]: I0129 12:30:03.983311 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt" (OuterVolumeSpecName: "kube-api-access-dqgqt") pod "697e0b44-2a41-46c2-88a1-5a95d72a2dd5" (UID: "697e0b44-2a41-46c2-88a1-5a95d72a2dd5"). InnerVolumeSpecName "kube-api-access-dqgqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.083436 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqgqt\" (UniqueName: \"kubernetes.io/projected/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-kube-api-access-dqgqt\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.083473 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/697e0b44-2a41-46c2-88a1-5a95d72a2dd5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.431312 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" event={"ID":"697e0b44-2a41-46c2-88a1-5a95d72a2dd5","Type":"ContainerDied","Data":"06a5a8fc2f312b8cb2f47faba8da78cac791d790be3e0540e74e5451c3aef207"} Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.431757 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06a5a8fc2f312b8cb2f47faba8da78cac791d790be3e0540e74e5451c3aef207" Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.431917 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn" Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.907112 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd"] Jan 29 12:30:04 crc kubenswrapper[4852]: I0129 12:30:04.917253 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494785-tg7cd"] Jan 29 12:30:05 crc kubenswrapper[4852]: I0129 12:30:05.033039 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-9d89-account-create-update-n8cxx"] Jan 29 12:30:05 crc kubenswrapper[4852]: I0129 12:30:05.047514 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-9d89-account-create-update-n8cxx"] Jan 29 12:30:05 crc kubenswrapper[4852]: I0129 12:30:05.481325 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d12e5a7-a0e7-4c98-bdb0-9a697ad80454" path="/var/lib/kubelet/pods/0d12e5a7-a0e7-4c98-bdb0-9a697ad80454/volumes" Jan 29 12:30:05 crc kubenswrapper[4852]: I0129 12:30:05.483678 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c41c0a46-8cd3-44af-8351-8a4008c6622f" path="/var/lib/kubelet/pods/c41c0a46-8cd3-44af-8351-8a4008c6622f/volumes" Jan 29 12:30:08 crc kubenswrapper[4852]: I0129 12:30:08.463533 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:30:08 crc kubenswrapper[4852]: E0129 12:30:08.464241 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:30:11 crc kubenswrapper[4852]: I0129 12:30:11.038559 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-sm2q7"] Jan 29 12:30:11 crc kubenswrapper[4852]: I0129 12:30:11.052117 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-sm2q7"] Jan 29 12:30:11 crc kubenswrapper[4852]: I0129 12:30:11.478365 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ebf09e3-a2fe-42f2-a3cc-6634486a8f88" path="/var/lib/kubelet/pods/3ebf09e3-a2fe-42f2-a3cc-6634486a8f88/volumes" Jan 29 12:30:13 crc kubenswrapper[4852]: I0129 12:30:13.041103 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-6377-account-create-update-jp4k2"] Jan 29 12:30:13 crc kubenswrapper[4852]: I0129 12:30:13.056855 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-6377-account-create-update-jp4k2"] Jan 29 12:30:13 crc kubenswrapper[4852]: I0129 12:30:13.480500 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f946532-86bc-4386-ac2e-188452c413d5" path="/var/lib/kubelet/pods/8f946532-86bc-4386-ac2e-188452c413d5/volumes" Jan 29 12:30:18 crc kubenswrapper[4852]: I0129 12:30:18.947949 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 12:30:22 crc kubenswrapper[4852]: I0129 12:30:22.463697 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:30:22 crc kubenswrapper[4852]: E0129 12:30:22.464559 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:30:37 crc kubenswrapper[4852]: I0129 12:30:37.463526 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:30:37 crc kubenswrapper[4852]: E0129 12:30:37.464309 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.463987 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:30:51 crc kubenswrapper[4852]: E0129 12:30:51.464891 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.736868 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:30:51 crc kubenswrapper[4852]: E0129 12:30:51.737451 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="697e0b44-2a41-46c2-88a1-5a95d72a2dd5" containerName="collect-profiles" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.737477 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="697e0b44-2a41-46c2-88a1-5a95d72a2dd5" containerName="collect-profiles" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.737854 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="697e0b44-2a41-46c2-88a1-5a95d72a2dd5" containerName="collect-profiles" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.739396 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.746431 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.756536 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.882447 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvvnv\" (UniqueName: \"kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.883059 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.883151 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.883508 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.883614 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.883749 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985429 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985481 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985543 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985570 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985732 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.985801 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvvnv\" (UniqueName: \"kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.986790 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.987202 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.987320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.987989 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:51 crc kubenswrapper[4852]: I0129 12:30:51.988123 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:52 crc kubenswrapper[4852]: I0129 12:30:52.007458 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvvnv\" (UniqueName: \"kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv\") pod \"dnsmasq-dns-7d8bfddc7-j6zzp\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:52 crc kubenswrapper[4852]: I0129 12:30:52.066809 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:52 crc kubenswrapper[4852]: I0129 12:30:52.648149 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:30:52 crc kubenswrapper[4852]: I0129 12:30:52.974201 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" event={"ID":"ca6bd452-1ddb-4b21-bee9-b726a2e83807","Type":"ContainerStarted","Data":"259e2870f9206d94728d03e3800b18a1d2f2f97992e151cdfd522ec5bce948f5"} Jan 29 12:30:53 crc kubenswrapper[4852]: I0129 12:30:53.988685 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerID="171771dd7c901ef81d0897158394af79e837f37781131099d0d2ec3994edcfe7" exitCode=0 Jan 29 12:30:53 crc kubenswrapper[4852]: I0129 12:30:53.988950 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" event={"ID":"ca6bd452-1ddb-4b21-bee9-b726a2e83807","Type":"ContainerDied","Data":"171771dd7c901ef81d0897158394af79e837f37781131099d0d2ec3994edcfe7"} Jan 29 12:30:55 crc kubenswrapper[4852]: I0129 12:30:55.007100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" event={"ID":"ca6bd452-1ddb-4b21-bee9-b726a2e83807","Type":"ContainerStarted","Data":"dc0ebd163257438fcc78ee5ad5ffa7b4ab14f030649ce5c01f60c4ddb1dd3190"} Jan 29 12:30:55 crc kubenswrapper[4852]: I0129 12:30:55.009328 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:30:55 crc kubenswrapper[4852]: I0129 12:30:55.035327 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" podStartSLOduration=4.035295013 podStartE2EDuration="4.035295013s" podCreationTimestamp="2026-01-29 12:30:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:30:55.030226429 +0000 UTC m=+6552.247557563" watchObservedRunningTime="2026-01-29 12:30:55.035295013 +0000 UTC m=+6552.252626187" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.068933 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.137157 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.137474 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="dnsmasq-dns" containerID="cri-o://0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c" gracePeriod=10 Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.328618 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-645d55dd59-xntcj"] Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.336853 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.391341 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-645d55dd59-xntcj"] Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.439610 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2dql\" (UniqueName: \"kubernetes.io/projected/15e0934d-7a6d-4db7-8faa-78a576e92e58-kube-api-access-q2dql\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.439922 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-config\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.440019 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-openstack-cell1\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.440139 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-nb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.440176 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-sb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.441057 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-dns-svc\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.543861 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2dql\" (UniqueName: \"kubernetes.io/projected/15e0934d-7a6d-4db7-8faa-78a576e92e58-kube-api-access-q2dql\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.543965 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-config\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.544010 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-openstack-cell1\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.544041 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-nb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.544060 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-sb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.544173 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-dns-svc\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.545126 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-dns-svc\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.545604 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-openstack-cell1\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.545718 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-nb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.546262 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-ovsdbserver-sb\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.547274 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15e0934d-7a6d-4db7-8faa-78a576e92e58-config\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.588068 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2dql\" (UniqueName: \"kubernetes.io/projected/15e0934d-7a6d-4db7-8faa-78a576e92e58-kube-api-access-q2dql\") pod \"dnsmasq-dns-645d55dd59-xntcj\" (UID: \"15e0934d-7a6d-4db7-8faa-78a576e92e58\") " pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.746324 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.855445 4852 scope.go:117] "RemoveContainer" containerID="ead816478c4b0e4f70347e6fa086efaf9a54dbb6fa83883a0636d3d7aae043ef" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.922867 4852 scope.go:117] "RemoveContainer" containerID="4b0400d42bc46f4768d249663bbb5992750075cf8635716db5156b8b1b2e73b8" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.975065 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:31:02 crc kubenswrapper[4852]: I0129 12:31:02.982893 4852 scope.go:117] "RemoveContainer" containerID="a96dca572f197dbbc5f8065c85bc43ea15de1f5c1f4ac7de4881d3e089da7468" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.032454 4852 scope.go:117] "RemoveContainer" containerID="62bac8f7709e4e9cc01f07a112cd4b5dc9cc8ccfa4eeee9a1b5124a631fdafbc" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.062254 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lvx9\" (UniqueName: \"kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9\") pod \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.062371 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb\") pod \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.062478 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb\") pod \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.062660 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config\") pod \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.062687 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc\") pod \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\" (UID: \"fa36c18f-3bc8-414d-9768-c3d23b84ae03\") " Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.071835 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9" (OuterVolumeSpecName: "kube-api-access-2lvx9") pod "fa36c18f-3bc8-414d-9768-c3d23b84ae03" (UID: "fa36c18f-3bc8-414d-9768-c3d23b84ae03"). InnerVolumeSpecName "kube-api-access-2lvx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.100718 4852 scope.go:117] "RemoveContainer" containerID="8dd3651d5d2a4606926e97715ab6ebd9494d8b8a21bd5c61fb93d4e6ce0863c9" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.131070 4852 generic.go:334] "Generic (PLEG): container finished" podID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerID="0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c" exitCode=0 Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.131204 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" event={"ID":"fa36c18f-3bc8-414d-9768-c3d23b84ae03","Type":"ContainerDied","Data":"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c"} Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.131273 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" event={"ID":"fa36c18f-3bc8-414d-9768-c3d23b84ae03","Type":"ContainerDied","Data":"2a6660c41f723e28eb6bbc3c3699b203e6e2febd9dc80688d20363ccf526ed40"} Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.131290 4852 scope.go:117] "RemoveContainer" containerID="0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.132164 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cf6ff4b45-dkm5w" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.138769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fa36c18f-3bc8-414d-9768-c3d23b84ae03" (UID: "fa36c18f-3bc8-414d-9768-c3d23b84ae03"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.161079 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fa36c18f-3bc8-414d-9768-c3d23b84ae03" (UID: "fa36c18f-3bc8-414d-9768-c3d23b84ae03"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.168560 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.168781 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lvx9\" (UniqueName: \"kubernetes.io/projected/fa36c18f-3bc8-414d-9768-c3d23b84ae03-kube-api-access-2lvx9\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.168800 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.190573 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fa36c18f-3bc8-414d-9768-c3d23b84ae03" (UID: "fa36c18f-3bc8-414d-9768-c3d23b84ae03"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.193750 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config" (OuterVolumeSpecName: "config") pod "fa36c18f-3bc8-414d-9768-c3d23b84ae03" (UID: "fa36c18f-3bc8-414d-9768-c3d23b84ae03"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.224603 4852 scope.go:117] "RemoveContainer" containerID="5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.271094 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.271123 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa36c18f-3bc8-414d-9768-c3d23b84ae03-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.316089 4852 scope.go:117] "RemoveContainer" containerID="0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c" Jan 29 12:31:03 crc kubenswrapper[4852]: E0129 12:31:03.316886 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c\": container with ID starting with 0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c not found: ID does not exist" containerID="0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.316928 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c"} err="failed to get container status \"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c\": rpc error: code = NotFound desc = could not find container \"0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c\": container with ID starting with 0a6f474dd544741393d3e5216f04be869a0190057b4a4382dd6f96173c7b444c not found: ID does not exist" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.316955 4852 scope.go:117] "RemoveContainer" containerID="5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e" Jan 29 12:31:03 crc kubenswrapper[4852]: E0129 12:31:03.317387 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e\": container with ID starting with 5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e not found: ID does not exist" containerID="5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.317465 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e"} err="failed to get container status \"5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e\": rpc error: code = NotFound desc = could not find container \"5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e\": container with ID starting with 5a3dd35e2ca675f09e1f2cd4b1bb19d706cd7889d8019903296c1fdc764ab88e not found: ID does not exist" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.423155 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-645d55dd59-xntcj"] Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.485930 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:31:03 crc kubenswrapper[4852]: E0129 12:31:03.491538 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.620548 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:31:03 crc kubenswrapper[4852]: I0129 12:31:03.631886 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cf6ff4b45-dkm5w"] Jan 29 12:31:04 crc kubenswrapper[4852]: I0129 12:31:04.148094 4852 generic.go:334] "Generic (PLEG): container finished" podID="15e0934d-7a6d-4db7-8faa-78a576e92e58" containerID="7d59310075f0333db0efaf4b6dd62e4824f416eac87b40167b5bfcbdb0a52fae" exitCode=0 Jan 29 12:31:04 crc kubenswrapper[4852]: I0129 12:31:04.148460 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" event={"ID":"15e0934d-7a6d-4db7-8faa-78a576e92e58","Type":"ContainerDied","Data":"7d59310075f0333db0efaf4b6dd62e4824f416eac87b40167b5bfcbdb0a52fae"} Jan 29 12:31:04 crc kubenswrapper[4852]: I0129 12:31:04.148493 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" event={"ID":"15e0934d-7a6d-4db7-8faa-78a576e92e58","Type":"ContainerStarted","Data":"e35ad064af5d04447defc1ff1b06830fba51d6b73f0e3b85d9362d9a0a6ee66e"} Jan 29 12:31:05 crc kubenswrapper[4852]: I0129 12:31:05.169503 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" event={"ID":"15e0934d-7a6d-4db7-8faa-78a576e92e58","Type":"ContainerStarted","Data":"992a71eae38d0fb651a797a9453d8172113fbef1e7b5799b1bbbb4d40a641fd6"} Jan 29 12:31:05 crc kubenswrapper[4852]: I0129 12:31:05.169799 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:05 crc kubenswrapper[4852]: I0129 12:31:05.196057 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" podStartSLOduration=3.196037637 podStartE2EDuration="3.196037637s" podCreationTimestamp="2026-01-29 12:31:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:05.186846363 +0000 UTC m=+6562.404177497" watchObservedRunningTime="2026-01-29 12:31:05.196037637 +0000 UTC m=+6562.413368771" Jan 29 12:31:05 crc kubenswrapper[4852]: I0129 12:31:05.475221 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" path="/var/lib/kubelet/pods/fa36c18f-3bc8-414d-9768-c3d23b84ae03/volumes" Jan 29 12:31:12 crc kubenswrapper[4852]: I0129 12:31:12.749610 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-645d55dd59-xntcj" Jan 29 12:31:12 crc kubenswrapper[4852]: I0129 12:31:12.853689 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:31:12 crc kubenswrapper[4852]: I0129 12:31:12.853979 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="dnsmasq-dns" containerID="cri-o://dc0ebd163257438fcc78ee5ad5ffa7b4ab14f030649ce5c01f60c4ddb1dd3190" gracePeriod=10 Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.268644 4852 generic.go:334] "Generic (PLEG): container finished" podID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerID="dc0ebd163257438fcc78ee5ad5ffa7b4ab14f030649ce5c01f60c4ddb1dd3190" exitCode=0 Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.268687 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" event={"ID":"ca6bd452-1ddb-4b21-bee9-b726a2e83807","Type":"ContainerDied","Data":"dc0ebd163257438fcc78ee5ad5ffa7b4ab14f030649ce5c01f60c4ddb1dd3190"} Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.568257 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723154 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723372 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723511 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvvnv\" (UniqueName: \"kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723625 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723722 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.723873 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1\") pod \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\" (UID: \"ca6bd452-1ddb-4b21-bee9-b726a2e83807\") " Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.736064 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv" (OuterVolumeSpecName: "kube-api-access-wvvnv") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "kube-api-access-wvvnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.784670 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.789362 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config" (OuterVolumeSpecName: "config") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.798299 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.805205 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.811223 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ca6bd452-1ddb-4b21-bee9-b726a2e83807" (UID: "ca6bd452-1ddb-4b21-bee9-b726a2e83807"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827633 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827672 4852 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827688 4852 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827704 4852 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827715 4852 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ca6bd452-1ddb-4b21-bee9-b726a2e83807-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:13 crc kubenswrapper[4852]: I0129 12:31:13.827728 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvvnv\" (UniqueName: \"kubernetes.io/projected/ca6bd452-1ddb-4b21-bee9-b726a2e83807-kube-api-access-wvvnv\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.280133 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" event={"ID":"ca6bd452-1ddb-4b21-bee9-b726a2e83807","Type":"ContainerDied","Data":"259e2870f9206d94728d03e3800b18a1d2f2f97992e151cdfd522ec5bce948f5"} Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.280171 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d8bfddc7-j6zzp" Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.280402 4852 scope.go:117] "RemoveContainer" containerID="dc0ebd163257438fcc78ee5ad5ffa7b4ab14f030649ce5c01f60c4ddb1dd3190" Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.309924 4852 scope.go:117] "RemoveContainer" containerID="171771dd7c901ef81d0897158394af79e837f37781131099d0d2ec3994edcfe7" Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.313557 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:31:14 crc kubenswrapper[4852]: I0129 12:31:14.322762 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d8bfddc7-j6zzp"] Jan 29 12:31:15 crc kubenswrapper[4852]: I0129 12:31:15.463675 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:31:15 crc kubenswrapper[4852]: E0129 12:31:15.464152 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:31:15 crc kubenswrapper[4852]: I0129 12:31:15.476643 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" path="/var/lib/kubelet/pods/ca6bd452-1ddb-4b21-bee9-b726a2e83807/volumes" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.964943 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6"] Jan 29 12:31:23 crc kubenswrapper[4852]: E0129 12:31:23.965871 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.965886 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: E0129 12:31:23.965898 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="init" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.965904 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="init" Jan 29 12:31:23 crc kubenswrapper[4852]: E0129 12:31:23.965928 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.965936 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: E0129 12:31:23.965952 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="init" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.965959 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="init" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.966156 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca6bd452-1ddb-4b21-bee9-b726a2e83807" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.966174 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa36c18f-3bc8-414d-9768-c3d23b84ae03" containerName="dnsmasq-dns" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.966933 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.970486 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.971003 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.971442 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:31:23 crc kubenswrapper[4852]: I0129 12:31:23.975025 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.000309 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6"] Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.093322 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.093539 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8zsb\" (UniqueName: \"kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.093709 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.093754 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.094094 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.197502 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.197664 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.197715 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.197795 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8zsb\" (UniqueName: \"kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.197865 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.220490 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.225500 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.228174 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.232307 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.257702 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8zsb\" (UniqueName: \"kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.290075 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:31:24 crc kubenswrapper[4852]: I0129 12:31:24.876114 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6"] Jan 29 12:31:24 crc kubenswrapper[4852]: W0129 12:31:24.882271 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe8d319f_c245_4fd4_87f3_2895eef499b9.slice/crio-c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166 WatchSource:0}: Error finding container c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166: Status 404 returned error can't find the container with id c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166 Jan 29 12:31:25 crc kubenswrapper[4852]: I0129 12:31:25.403889 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" event={"ID":"fe8d319f-c245-4fd4-87f3-2895eef499b9","Type":"ContainerStarted","Data":"c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166"} Jan 29 12:31:26 crc kubenswrapper[4852]: I0129 12:31:26.464275 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:31:26 crc kubenswrapper[4852]: E0129 12:31:26.467353 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:31:30 crc kubenswrapper[4852]: I0129 12:31:30.069123 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-wpt84"] Jan 29 12:31:30 crc kubenswrapper[4852]: I0129 12:31:30.081844 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-wpt84"] Jan 29 12:31:31 crc kubenswrapper[4852]: I0129 12:31:31.477541 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4a5ba20-d1dd-44c5-8d54-ef56e18c5006" path="/var/lib/kubelet/pods/b4a5ba20-d1dd-44c5-8d54-ef56e18c5006/volumes" Jan 29 12:31:37 crc kubenswrapper[4852]: E0129 12:31:37.536301 4852 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Jan 29 12:31:37 crc kubenswrapper[4852]: E0129 12:31:37.536975 4852 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 12:31:37 crc kubenswrapper[4852]: container &Container{Name:pre-adoption-validation-openstack-pre-adoption-openstack-cell1,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p osp.edpm.pre_adoption_validation -i pre-adoption-validation-openstack-pre-adoption-openstack-cell1],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_CALLBACKS_ENABLED,Value:ansible.posix.profile_tasks,ValueFrom:nil,},EnvVar{Name:ANSIBLE_CALLBACK_RESULT_FORMAT,Value:yaml,ValueFrom:nil,},EnvVar{Name:ANSIBLE_FORCE_COLOR,Value:True,ValueFrom:nil,},EnvVar{Name:ANSIBLE_DISPLAY_ARGS_TO_STDOUT,Value:True,ValueFrom:nil,},EnvVar{Name:ANSIBLE_SSH_ARGS,Value:-C -o ControlMaster=auto -o ControlPersist=80s,ValueFrom:nil,},EnvVar{Name:ANSIBLE_VERBOSITY,Value:1,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Jan 29 12:31:37 crc kubenswrapper[4852]: osp.edpm.pre_adoption_validation Jan 29 12:31:37 crc kubenswrapper[4852]: Jan 29 12:31:37 crc kubenswrapper[4852]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Jan 29 12:31:37 crc kubenswrapper[4852]: edpm_override_hosts: openstack-cell1 Jan 29 12:31:37 crc kubenswrapper[4852]: edpm_service_type: pre-adoption-validation Jan 29 12:31:37 crc kubenswrapper[4852]: edpm_services_override: [pre-adoption-validation] Jan 29 12:31:37 crc kubenswrapper[4852]: Jan 29 12:31:37 crc kubenswrapper[4852]: Jan 29 12:31:37 crc kubenswrapper[4852]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ceph,ReadOnly:true,MountPath:/etc/ceph,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:pre-adoption-validation-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/pre-adoption-validation,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key-openstack-cell1,ReadOnly:false,MountPath:/runner/env/ssh_key/ssh_key_openstack-cell1,SubPath:ssh_key_openstack-cell1,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b8zsb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6_openstack(fe8d319f-c245-4fd4-87f3-2895eef499b9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Jan 29 12:31:37 crc kubenswrapper[4852]: > logger="UnhandledError" Jan 29 12:31:37 crc kubenswrapper[4852]: E0129 12:31:37.538121 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pre-adoption-validation-openstack-pre-adoption-openstack-cell1\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" podUID="fe8d319f-c245-4fd4-87f3-2895eef499b9" Jan 29 12:31:37 crc kubenswrapper[4852]: E0129 12:31:37.556451 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pre-adoption-validation-openstack-pre-adoption-openstack-cell1\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" podUID="fe8d319f-c245-4fd4-87f3-2895eef499b9" Jan 29 12:31:41 crc kubenswrapper[4852]: I0129 12:31:41.465285 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:31:41 crc kubenswrapper[4852]: E0129 12:31:41.466206 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:31:49 crc kubenswrapper[4852]: I0129 12:31:49.031730 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:31:49 crc kubenswrapper[4852]: I0129 12:31:49.694108 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" event={"ID":"fe8d319f-c245-4fd4-87f3-2895eef499b9","Type":"ContainerStarted","Data":"3ddfc7bd05ff59b278640f279ea350cf8570d6c48210c59d93bbee8657769b29"} Jan 29 12:31:49 crc kubenswrapper[4852]: I0129 12:31:49.713146 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" podStartSLOduration=2.568512148 podStartE2EDuration="26.713130996s" podCreationTimestamp="2026-01-29 12:31:23 +0000 UTC" firstStartedPulling="2026-01-29 12:31:24.885061654 +0000 UTC m=+6582.102392788" lastFinishedPulling="2026-01-29 12:31:49.029680502 +0000 UTC m=+6606.247011636" observedRunningTime="2026-01-29 12:31:49.711529417 +0000 UTC m=+6606.928860551" watchObservedRunningTime="2026-01-29 12:31:49.713130996 +0000 UTC m=+6606.930462120" Jan 29 12:31:55 crc kubenswrapper[4852]: I0129 12:31:55.463906 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:31:55 crc kubenswrapper[4852]: E0129 12:31:55.464810 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.407448 4852 scope.go:117] "RemoveContainer" containerID="97084f378b0bc134c9d8dbd0a24332eea492c3a9640d920b48590e19ac83d9d9" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.435889 4852 scope.go:117] "RemoveContainer" containerID="1f4cebec076485946127332ed10da33e7ab3dedc03606ef2361c38dfb8e29246" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.509943 4852 scope.go:117] "RemoveContainer" containerID="0f55abbd93b4226b1ebf16db1880aa58ffe2bb8d5d16d82970d78f577ca6fbbc" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.556694 4852 scope.go:117] "RemoveContainer" containerID="d41c63a1c4f493cef337de90e1226c40bc0d79445712f98cd191c43ad5b03ade" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.615502 4852 scope.go:117] "RemoveContainer" containerID="356a7e6bdb87dfe2fe678278c7c7e143d9514714440aaf3d981f8fd2c8a9d0af" Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.857442 4852 generic.go:334] "Generic (PLEG): container finished" podID="fe8d319f-c245-4fd4-87f3-2895eef499b9" containerID="3ddfc7bd05ff59b278640f279ea350cf8570d6c48210c59d93bbee8657769b29" exitCode=0 Jan 29 12:32:03 crc kubenswrapper[4852]: I0129 12:32:03.857485 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" event={"ID":"fe8d319f-c245-4fd4-87f3-2895eef499b9","Type":"ContainerDied","Data":"3ddfc7bd05ff59b278640f279ea350cf8570d6c48210c59d93bbee8657769b29"} Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.397211 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.504326 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8zsb\" (UniqueName: \"kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb\") pod \"fe8d319f-c245-4fd4-87f3-2895eef499b9\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.504902 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1\") pod \"fe8d319f-c245-4fd4-87f3-2895eef499b9\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.504967 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle\") pod \"fe8d319f-c245-4fd4-87f3-2895eef499b9\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.505144 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory\") pod \"fe8d319f-c245-4fd4-87f3-2895eef499b9\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.505194 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph\") pod \"fe8d319f-c245-4fd4-87f3-2895eef499b9\" (UID: \"fe8d319f-c245-4fd4-87f3-2895eef499b9\") " Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.511066 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph" (OuterVolumeSpecName: "ceph") pod "fe8d319f-c245-4fd4-87f3-2895eef499b9" (UID: "fe8d319f-c245-4fd4-87f3-2895eef499b9"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.511474 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb" (OuterVolumeSpecName: "kube-api-access-b8zsb") pod "fe8d319f-c245-4fd4-87f3-2895eef499b9" (UID: "fe8d319f-c245-4fd4-87f3-2895eef499b9"). InnerVolumeSpecName "kube-api-access-b8zsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.519542 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "fe8d319f-c245-4fd4-87f3-2895eef499b9" (UID: "fe8d319f-c245-4fd4-87f3-2895eef499b9"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.537297 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory" (OuterVolumeSpecName: "inventory") pod "fe8d319f-c245-4fd4-87f3-2895eef499b9" (UID: "fe8d319f-c245-4fd4-87f3-2895eef499b9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.549348 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "fe8d319f-c245-4fd4-87f3-2895eef499b9" (UID: "fe8d319f-c245-4fd4-87f3-2895eef499b9"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.607968 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.607999 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.608009 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8zsb\" (UniqueName: \"kubernetes.io/projected/fe8d319f-c245-4fd4-87f3-2895eef499b9-kube-api-access-b8zsb\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.608019 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.608029 4852 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe8d319f-c245-4fd4-87f3-2895eef499b9-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.891550 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" event={"ID":"fe8d319f-c245-4fd4-87f3-2895eef499b9","Type":"ContainerDied","Data":"c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166"} Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.891724 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c78bdceef8394b9dcf1f6d0d6048d6f0dac3afeb98fe8b270f6c4fdb19b76166" Jan 29 12:32:05 crc kubenswrapper[4852]: I0129 12:32:05.891658 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6" Jan 29 12:32:06 crc kubenswrapper[4852]: I0129 12:32:06.463995 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:32:06 crc kubenswrapper[4852]: E0129 12:32:06.464773 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.042517 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h"] Jan 29 12:32:07 crc kubenswrapper[4852]: E0129 12:32:07.043333 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe8d319f-c245-4fd4-87f3-2895eef499b9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.043369 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe8d319f-c245-4fd4-87f3-2895eef499b9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.043849 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe8d319f-c245-4fd4-87f3-2895eef499b9" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.045242 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.048235 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.049296 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.049674 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.052941 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.055532 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h"] Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.154373 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.154448 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.154601 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mxmb\" (UniqueName: \"kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.154706 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.154775 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.257195 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.257280 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.257366 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.257448 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.257651 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mxmb\" (UniqueName: \"kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.262958 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.267575 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.267952 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.275433 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.283987 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mxmb\" (UniqueName: \"kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.368775 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:32:07 crc kubenswrapper[4852]: I0129 12:32:07.973567 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h"] Jan 29 12:32:08 crc kubenswrapper[4852]: I0129 12:32:08.920316 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" event={"ID":"6a211413-55d7-4f37-a2ea-c452ecba4bcc","Type":"ContainerStarted","Data":"5dbacd627e5a831202fbefb51661c504dcca2b758436cd63420f1e0aa9e5a27e"} Jan 29 12:32:09 crc kubenswrapper[4852]: I0129 12:32:09.930495 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" event={"ID":"6a211413-55d7-4f37-a2ea-c452ecba4bcc","Type":"ContainerStarted","Data":"9bb8b6bf44a059cbe46af70792e242ee058c584cffff1a235bc7acf692cafd17"} Jan 29 12:32:09 crc kubenswrapper[4852]: I0129 12:32:09.952921 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" podStartSLOduration=1.426517113 podStartE2EDuration="2.952898627s" podCreationTimestamp="2026-01-29 12:32:07 +0000 UTC" firstStartedPulling="2026-01-29 12:32:07.989741366 +0000 UTC m=+6625.207072500" lastFinishedPulling="2026-01-29 12:32:09.51612284 +0000 UTC m=+6626.733454014" observedRunningTime="2026-01-29 12:32:09.949486764 +0000 UTC m=+6627.166817918" watchObservedRunningTime="2026-01-29 12:32:09.952898627 +0000 UTC m=+6627.170229761" Jan 29 12:32:21 crc kubenswrapper[4852]: I0129 12:32:21.463484 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:32:21 crc kubenswrapper[4852]: E0129 12:32:21.464374 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:32:36 crc kubenswrapper[4852]: I0129 12:32:36.463727 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:32:37 crc kubenswrapper[4852]: I0129 12:32:37.237281 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6"} Jan 29 12:34:56 crc kubenswrapper[4852]: I0129 12:34:56.045031 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-1039-account-create-update-887rr"] Jan 29 12:34:56 crc kubenswrapper[4852]: I0129 12:34:56.055753 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-ckqhp"] Jan 29 12:34:56 crc kubenswrapper[4852]: I0129 12:34:56.065932 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-1039-account-create-update-887rr"] Jan 29 12:34:56 crc kubenswrapper[4852]: I0129 12:34:56.074021 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-ckqhp"] Jan 29 12:34:57 crc kubenswrapper[4852]: I0129 12:34:57.481470 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c48eb14d-cbad-4154-a588-7271c69721af" path="/var/lib/kubelet/pods/c48eb14d-cbad-4154-a588-7271c69721af/volumes" Jan 29 12:34:57 crc kubenswrapper[4852]: I0129 12:34:57.482908 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8d97fec-53d7-4bb5-95ee-665b47fb821e" path="/var/lib/kubelet/pods/d8d97fec-53d7-4bb5-95ee-665b47fb821e/volumes" Jan 29 12:35:00 crc kubenswrapper[4852]: I0129 12:35:00.018881 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:35:00 crc kubenswrapper[4852]: I0129 12:35:00.019235 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:35:03 crc kubenswrapper[4852]: I0129 12:35:03.847322 4852 scope.go:117] "RemoveContainer" containerID="ea85d8a31b5145744c386a68641cbbd734d18fa7b09c82f25f388f12094d81bb" Jan 29 12:35:03 crc kubenswrapper[4852]: I0129 12:35:03.894004 4852 scope.go:117] "RemoveContainer" containerID="85ce1cec0657b57ab134b72efe8d35272e18575cfc625fc0e1811ce901743956" Jan 29 12:35:10 crc kubenswrapper[4852]: I0129 12:35:10.059762 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-tppch"] Jan 29 12:35:10 crc kubenswrapper[4852]: I0129 12:35:10.070822 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-tppch"] Jan 29 12:35:11 crc kubenswrapper[4852]: I0129 12:35:11.474982 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd41f0b6-b8cb-489a-89a1-9daa4f8f881d" path="/var/lib/kubelet/pods/fd41f0b6-b8cb-489a-89a1-9daa4f8f881d/volumes" Jan 29 12:35:30 crc kubenswrapper[4852]: I0129 12:35:30.017538 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:35:30 crc kubenswrapper[4852]: I0129 12:35:30.018193 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:35:49 crc kubenswrapper[4852]: I0129 12:35:49.927944 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:35:49 crc kubenswrapper[4852]: I0129 12:35:49.932189 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:49 crc kubenswrapper[4852]: I0129 12:35:49.962043 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.015978 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.016302 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqvrh\" (UniqueName: \"kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.016651 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.118190 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.118281 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.118326 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqvrh\" (UniqueName: \"kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.118942 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.119056 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.152685 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqvrh\" (UniqueName: \"kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh\") pod \"community-operators-k72r7\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.263722 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:35:50 crc kubenswrapper[4852]: I0129 12:35:50.764371 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:35:51 crc kubenswrapper[4852]: I0129 12:35:51.372848 4852 generic.go:334] "Generic (PLEG): container finished" podID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerID="1e630816739a55688fb27c7d9530a9406984f962b5fd391184babbb85fcfc768" exitCode=0 Jan 29 12:35:51 crc kubenswrapper[4852]: I0129 12:35:51.372907 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerDied","Data":"1e630816739a55688fb27c7d9530a9406984f962b5fd391184babbb85fcfc768"} Jan 29 12:35:51 crc kubenswrapper[4852]: I0129 12:35:51.373132 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerStarted","Data":"58f08ce060d76d8768492e3717ead3bb44bbe719027afd597a91ce30bb44a89e"} Jan 29 12:35:51 crc kubenswrapper[4852]: I0129 12:35:51.376032 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:35:52 crc kubenswrapper[4852]: I0129 12:35:52.385871 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerStarted","Data":"266d54340cf8079acfac5ec5a4afe983e47a6f46c8462bbd382a5060da81d6b8"} Jan 29 12:35:54 crc kubenswrapper[4852]: I0129 12:35:54.409427 4852 generic.go:334] "Generic (PLEG): container finished" podID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerID="266d54340cf8079acfac5ec5a4afe983e47a6f46c8462bbd382a5060da81d6b8" exitCode=0 Jan 29 12:35:54 crc kubenswrapper[4852]: I0129 12:35:54.409521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerDied","Data":"266d54340cf8079acfac5ec5a4afe983e47a6f46c8462bbd382a5060da81d6b8"} Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.273357 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.277545 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.307419 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.430203 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerStarted","Data":"c63482860a3f7f1e29dbace89ca7a4f906268f2be2ef9b47f872d666c374ec2f"} Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.451523 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k72r7" podStartSLOduration=3.5619270800000002 podStartE2EDuration="7.451505057s" podCreationTimestamp="2026-01-29 12:35:49 +0000 UTC" firstStartedPulling="2026-01-29 12:35:51.375767272 +0000 UTC m=+6848.593098406" lastFinishedPulling="2026-01-29 12:35:55.265345249 +0000 UTC m=+6852.482676383" observedRunningTime="2026-01-29 12:35:56.448986915 +0000 UTC m=+6853.666318049" watchObservedRunningTime="2026-01-29 12:35:56.451505057 +0000 UTC m=+6853.668836191" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.460779 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2bc6\" (UniqueName: \"kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.460885 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.460922 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.564268 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2bc6\" (UniqueName: \"kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.564420 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.564464 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.565472 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.565516 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.586450 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2bc6\" (UniqueName: \"kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6\") pod \"redhat-operators-smt52\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:56 crc kubenswrapper[4852]: I0129 12:35:56.612672 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:35:57 crc kubenswrapper[4852]: I0129 12:35:57.185558 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:35:57 crc kubenswrapper[4852]: I0129 12:35:57.443577 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerStarted","Data":"ff1cb6d0af7c2606aa3f98b7a0fd7cd60d389ebccfb0c9a7a67c4f92794a31ab"} Jan 29 12:35:58 crc kubenswrapper[4852]: I0129 12:35:58.456442 4852 generic.go:334] "Generic (PLEG): container finished" podID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerID="7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0" exitCode=0 Jan 29 12:35:58 crc kubenswrapper[4852]: I0129 12:35:58.456764 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerDied","Data":"7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0"} Jan 29 12:35:59 crc kubenswrapper[4852]: I0129 12:35:59.478401 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerStarted","Data":"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a"} Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.017332 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.017390 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.017438 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.018643 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.018694 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6" gracePeriod=600 Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.264972 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.265259 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.482652 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6" exitCode=0 Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.482696 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6"} Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.482743 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63"} Jan 29 12:36:00 crc kubenswrapper[4852]: I0129 12:36:00.482762 4852 scope.go:117] "RemoveContainer" containerID="12e5535e93b0294d363d8be038de6a5a8eb7a28a7c1b9e5b9296252a9187e751" Jan 29 12:36:01 crc kubenswrapper[4852]: I0129 12:36:01.391824 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-k72r7" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" probeResult="failure" output=< Jan 29 12:36:01 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:36:01 crc kubenswrapper[4852]: > Jan 29 12:36:03 crc kubenswrapper[4852]: I0129 12:36:03.983694 4852 scope.go:117] "RemoveContainer" containerID="d02d7c6a1b9376ce7b946e455057c54c8865e649823a32179521c8249cf4afb5" Jan 29 12:36:07 crc kubenswrapper[4852]: I0129 12:36:07.565109 4852 generic.go:334] "Generic (PLEG): container finished" podID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerID="4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a" exitCode=0 Jan 29 12:36:07 crc kubenswrapper[4852]: I0129 12:36:07.565170 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerDied","Data":"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a"} Jan 29 12:36:09 crc kubenswrapper[4852]: I0129 12:36:09.595182 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerStarted","Data":"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a"} Jan 29 12:36:09 crc kubenswrapper[4852]: I0129 12:36:09.625884 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-smt52" podStartSLOduration=3.735687629 podStartE2EDuration="13.625866184s" podCreationTimestamp="2026-01-29 12:35:56 +0000 UTC" firstStartedPulling="2026-01-29 12:35:58.458514045 +0000 UTC m=+6855.675845179" lastFinishedPulling="2026-01-29 12:36:08.3486926 +0000 UTC m=+6865.566023734" observedRunningTime="2026-01-29 12:36:09.621112089 +0000 UTC m=+6866.838443223" watchObservedRunningTime="2026-01-29 12:36:09.625866184 +0000 UTC m=+6866.843197318" Jan 29 12:36:11 crc kubenswrapper[4852]: I0129 12:36:11.311791 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-k72r7" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" probeResult="failure" output=< Jan 29 12:36:11 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:36:11 crc kubenswrapper[4852]: > Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.467766 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.472829 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.488469 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.612819 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.612878 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.621692 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.621806 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.621876 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2jkk\" (UniqueName: \"kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.724547 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.724618 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.724685 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2jkk\" (UniqueName: \"kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.725179 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.725196 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.746130 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2jkk\" (UniqueName: \"kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk\") pod \"redhat-marketplace-b8qvz\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:16 crc kubenswrapper[4852]: I0129 12:36:16.793741 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:17 crc kubenswrapper[4852]: I0129 12:36:17.405837 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:17 crc kubenswrapper[4852]: W0129 12:36:17.414437 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49c8ba28_2ebb_417c_b777_74513e9362a6.slice/crio-cefe0638a103b449f64d72dee9ab139a107e8f0a7a89ca0e85aec4e90122263d WatchSource:0}: Error finding container cefe0638a103b449f64d72dee9ab139a107e8f0a7a89ca0e85aec4e90122263d: Status 404 returned error can't find the container with id cefe0638a103b449f64d72dee9ab139a107e8f0a7a89ca0e85aec4e90122263d Jan 29 12:36:17 crc kubenswrapper[4852]: I0129 12:36:17.661912 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-smt52" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" probeResult="failure" output=< Jan 29 12:36:17 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:36:17 crc kubenswrapper[4852]: > Jan 29 12:36:17 crc kubenswrapper[4852]: I0129 12:36:17.673356 4852 generic.go:334] "Generic (PLEG): container finished" podID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerID="ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd" exitCode=0 Jan 29 12:36:17 crc kubenswrapper[4852]: I0129 12:36:17.673395 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerDied","Data":"ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd"} Jan 29 12:36:17 crc kubenswrapper[4852]: I0129 12:36:17.673426 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerStarted","Data":"cefe0638a103b449f64d72dee9ab139a107e8f0a7a89ca0e85aec4e90122263d"} Jan 29 12:36:19 crc kubenswrapper[4852]: I0129 12:36:19.695931 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerStarted","Data":"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12"} Jan 29 12:36:20 crc kubenswrapper[4852]: I0129 12:36:20.320137 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:20 crc kubenswrapper[4852]: I0129 12:36:20.382539 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:20 crc kubenswrapper[4852]: I0129 12:36:20.710556 4852 generic.go:334] "Generic (PLEG): container finished" podID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerID="0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12" exitCode=0 Jan 29 12:36:20 crc kubenswrapper[4852]: I0129 12:36:20.711191 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerDied","Data":"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12"} Jan 29 12:36:21 crc kubenswrapper[4852]: I0129 12:36:21.728869 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerStarted","Data":"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7"} Jan 29 12:36:21 crc kubenswrapper[4852]: I0129 12:36:21.751912 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b8qvz" podStartSLOduration=2.277569311 podStartE2EDuration="5.751888004s" podCreationTimestamp="2026-01-29 12:36:16 +0000 UTC" firstStartedPulling="2026-01-29 12:36:17.675041566 +0000 UTC m=+6874.892372700" lastFinishedPulling="2026-01-29 12:36:21.149360259 +0000 UTC m=+6878.366691393" observedRunningTime="2026-01-29 12:36:21.745246962 +0000 UTC m=+6878.962578106" watchObservedRunningTime="2026-01-29 12:36:21.751888004 +0000 UTC m=+6878.969219138" Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.253785 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.254292 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k72r7" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" containerID="cri-o://c63482860a3f7f1e29dbace89ca7a4f906268f2be2ef9b47f872d666c374ec2f" gracePeriod=2 Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.750758 4852 generic.go:334] "Generic (PLEG): container finished" podID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerID="c63482860a3f7f1e29dbace89ca7a4f906268f2be2ef9b47f872d666c374ec2f" exitCode=0 Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.751666 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerDied","Data":"c63482860a3f7f1e29dbace89ca7a4f906268f2be2ef9b47f872d666c374ec2f"} Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.898143 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.973486 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqvrh\" (UniqueName: \"kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh\") pod \"aa80c707-37d9-448c-9b5f-b318b0885cf5\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.973621 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content\") pod \"aa80c707-37d9-448c-9b5f-b318b0885cf5\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.973799 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities\") pod \"aa80c707-37d9-448c-9b5f-b318b0885cf5\" (UID: \"aa80c707-37d9-448c-9b5f-b318b0885cf5\") " Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.976065 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities" (OuterVolumeSpecName: "utilities") pod "aa80c707-37d9-448c-9b5f-b318b0885cf5" (UID: "aa80c707-37d9-448c-9b5f-b318b0885cf5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:22 crc kubenswrapper[4852]: I0129 12:36:22.986095 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh" (OuterVolumeSpecName: "kube-api-access-bqvrh") pod "aa80c707-37d9-448c-9b5f-b318b0885cf5" (UID: "aa80c707-37d9-448c-9b5f-b318b0885cf5"). InnerVolumeSpecName "kube-api-access-bqvrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.045749 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa80c707-37d9-448c-9b5f-b318b0885cf5" (UID: "aa80c707-37d9-448c-9b5f-b318b0885cf5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.076810 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.076848 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqvrh\" (UniqueName: \"kubernetes.io/projected/aa80c707-37d9-448c-9b5f-b318b0885cf5-kube-api-access-bqvrh\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.076859 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa80c707-37d9-448c-9b5f-b318b0885cf5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.764771 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k72r7" event={"ID":"aa80c707-37d9-448c-9b5f-b318b0885cf5","Type":"ContainerDied","Data":"58f08ce060d76d8768492e3717ead3bb44bbe719027afd597a91ce30bb44a89e"} Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.764840 4852 scope.go:117] "RemoveContainer" containerID="c63482860a3f7f1e29dbace89ca7a4f906268f2be2ef9b47f872d666c374ec2f" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.764870 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k72r7" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.796995 4852 scope.go:117] "RemoveContainer" containerID="266d54340cf8079acfac5ec5a4afe983e47a6f46c8462bbd382a5060da81d6b8" Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.798361 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.810619 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k72r7"] Jan 29 12:36:23 crc kubenswrapper[4852]: I0129 12:36:23.821037 4852 scope.go:117] "RemoveContainer" containerID="1e630816739a55688fb27c7d9530a9406984f962b5fd391184babbb85fcfc768" Jan 29 12:36:25 crc kubenswrapper[4852]: I0129 12:36:25.481341 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" path="/var/lib/kubelet/pods/aa80c707-37d9-448c-9b5f-b318b0885cf5/volumes" Jan 29 12:36:26 crc kubenswrapper[4852]: I0129 12:36:26.794508 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:26 crc kubenswrapper[4852]: I0129 12:36:26.795641 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:26 crc kubenswrapper[4852]: I0129 12:36:26.841958 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:27 crc kubenswrapper[4852]: I0129 12:36:27.663783 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-smt52" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" probeResult="failure" output=< Jan 29 12:36:27 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:36:27 crc kubenswrapper[4852]: > Jan 29 12:36:27 crc kubenswrapper[4852]: I0129 12:36:27.873965 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:28 crc kubenswrapper[4852]: I0129 12:36:28.659143 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:29 crc kubenswrapper[4852]: I0129 12:36:29.835849 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b8qvz" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="registry-server" containerID="cri-o://fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7" gracePeriod=2 Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.352470 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.442269 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities\") pod \"49c8ba28-2ebb-417c-b777-74513e9362a6\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.442432 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2jkk\" (UniqueName: \"kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk\") pod \"49c8ba28-2ebb-417c-b777-74513e9362a6\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.442493 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content\") pod \"49c8ba28-2ebb-417c-b777-74513e9362a6\" (UID: \"49c8ba28-2ebb-417c-b777-74513e9362a6\") " Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.447458 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk" (OuterVolumeSpecName: "kube-api-access-t2jkk") pod "49c8ba28-2ebb-417c-b777-74513e9362a6" (UID: "49c8ba28-2ebb-417c-b777-74513e9362a6"). InnerVolumeSpecName "kube-api-access-t2jkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.450190 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities" (OuterVolumeSpecName: "utilities") pod "49c8ba28-2ebb-417c-b777-74513e9362a6" (UID: "49c8ba28-2ebb-417c-b777-74513e9362a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.469533 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49c8ba28-2ebb-417c-b777-74513e9362a6" (UID: "49c8ba28-2ebb-417c-b777-74513e9362a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.546068 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.546096 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2jkk\" (UniqueName: \"kubernetes.io/projected/49c8ba28-2ebb-417c-b777-74513e9362a6-kube-api-access-t2jkk\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.546106 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49c8ba28-2ebb-417c-b777-74513e9362a6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.852827 4852 generic.go:334] "Generic (PLEG): container finished" podID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerID="fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7" exitCode=0 Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.852878 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerDied","Data":"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7"} Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.852907 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b8qvz" event={"ID":"49c8ba28-2ebb-417c-b777-74513e9362a6","Type":"ContainerDied","Data":"cefe0638a103b449f64d72dee9ab139a107e8f0a7a89ca0e85aec4e90122263d"} Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.852922 4852 scope.go:117] "RemoveContainer" containerID="fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.853079 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b8qvz" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.879191 4852 scope.go:117] "RemoveContainer" containerID="0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.908478 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.927601 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b8qvz"] Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.930796 4852 scope.go:117] "RemoveContainer" containerID="ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.962611 4852 scope.go:117] "RemoveContainer" containerID="fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7" Jan 29 12:36:30 crc kubenswrapper[4852]: E0129 12:36:30.962976 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7\": container with ID starting with fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7 not found: ID does not exist" containerID="fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.963008 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7"} err="failed to get container status \"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7\": rpc error: code = NotFound desc = could not find container \"fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7\": container with ID starting with fc23ed5a64b9f6c21877dc3b8438e87835cbd3c4bc56185c9f556a0e5f13cab7 not found: ID does not exist" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.963028 4852 scope.go:117] "RemoveContainer" containerID="0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12" Jan 29 12:36:30 crc kubenswrapper[4852]: E0129 12:36:30.963252 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12\": container with ID starting with 0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12 not found: ID does not exist" containerID="0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.963277 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12"} err="failed to get container status \"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12\": rpc error: code = NotFound desc = could not find container \"0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12\": container with ID starting with 0c134712f08cda542f16749a432b2948429763b0940d4e90333712c97f7b7b12 not found: ID does not exist" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.963292 4852 scope.go:117] "RemoveContainer" containerID="ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd" Jan 29 12:36:30 crc kubenswrapper[4852]: E0129 12:36:30.963531 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd\": container with ID starting with ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd not found: ID does not exist" containerID="ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd" Jan 29 12:36:30 crc kubenswrapper[4852]: I0129 12:36:30.963554 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd"} err="failed to get container status \"ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd\": rpc error: code = NotFound desc = could not find container \"ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd\": container with ID starting with ecbf2dc497c84c5c928a49f81452aba9afae2557159c5202b515de5c3aca06bd not found: ID does not exist" Jan 29 12:36:31 crc kubenswrapper[4852]: I0129 12:36:31.475761 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" path="/var/lib/kubelet/pods/49c8ba28-2ebb-417c-b777-74513e9362a6/volumes" Jan 29 12:36:37 crc kubenswrapper[4852]: I0129 12:36:37.664295 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-smt52" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" probeResult="failure" output=< Jan 29 12:36:37 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:36:37 crc kubenswrapper[4852]: > Jan 29 12:36:46 crc kubenswrapper[4852]: I0129 12:36:46.686907 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:46 crc kubenswrapper[4852]: I0129 12:36:46.748047 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:47 crc kubenswrapper[4852]: I0129 12:36:47.668921 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.049737 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-smt52" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" containerID="cri-o://d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a" gracePeriod=2 Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.600488 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.678730 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities\") pod \"60b7b1fc-ae6e-4b79-8202-661e06189e73\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.678882 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2bc6\" (UniqueName: \"kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6\") pod \"60b7b1fc-ae6e-4b79-8202-661e06189e73\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.678947 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content\") pod \"60b7b1fc-ae6e-4b79-8202-661e06189e73\" (UID: \"60b7b1fc-ae6e-4b79-8202-661e06189e73\") " Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.679774 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities" (OuterVolumeSpecName: "utilities") pod "60b7b1fc-ae6e-4b79-8202-661e06189e73" (UID: "60b7b1fc-ae6e-4b79-8202-661e06189e73"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.680234 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.692388 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6" (OuterVolumeSpecName: "kube-api-access-w2bc6") pod "60b7b1fc-ae6e-4b79-8202-661e06189e73" (UID: "60b7b1fc-ae6e-4b79-8202-661e06189e73"). InnerVolumeSpecName "kube-api-access-w2bc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.782697 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2bc6\" (UniqueName: \"kubernetes.io/projected/60b7b1fc-ae6e-4b79-8202-661e06189e73-kube-api-access-w2bc6\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.823236 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60b7b1fc-ae6e-4b79-8202-661e06189e73" (UID: "60b7b1fc-ae6e-4b79-8202-661e06189e73"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:36:48 crc kubenswrapper[4852]: I0129 12:36:48.884984 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60b7b1fc-ae6e-4b79-8202-661e06189e73-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.078436 4852 generic.go:334] "Generic (PLEG): container finished" podID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerID="d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a" exitCode=0 Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.079308 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-smt52" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.079344 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerDied","Data":"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a"} Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.086912 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-smt52" event={"ID":"60b7b1fc-ae6e-4b79-8202-661e06189e73","Type":"ContainerDied","Data":"ff1cb6d0af7c2606aa3f98b7a0fd7cd60d389ebccfb0c9a7a67c4f92794a31ab"} Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.086989 4852 scope.go:117] "RemoveContainer" containerID="d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.111105 4852 scope.go:117] "RemoveContainer" containerID="4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.138511 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.147147 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-smt52"] Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.158339 4852 scope.go:117] "RemoveContainer" containerID="7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.198532 4852 scope.go:117] "RemoveContainer" containerID="d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a" Jan 29 12:36:49 crc kubenswrapper[4852]: E0129 12:36:49.199073 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a\": container with ID starting with d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a not found: ID does not exist" containerID="d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.199164 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a"} err="failed to get container status \"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a\": rpc error: code = NotFound desc = could not find container \"d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a\": container with ID starting with d593b2ad421cdb80ee09035acdadbb61df10e7c557a758e449b16738c71e533a not found: ID does not exist" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.199238 4852 scope.go:117] "RemoveContainer" containerID="4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a" Jan 29 12:36:49 crc kubenswrapper[4852]: E0129 12:36:49.199717 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a\": container with ID starting with 4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a not found: ID does not exist" containerID="4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.199814 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a"} err="failed to get container status \"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a\": rpc error: code = NotFound desc = could not find container \"4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a\": container with ID starting with 4404aa0baf02ab72e1ff0619fbe2287b9cf125baf9233eae9ea069894b82053a not found: ID does not exist" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.199877 4852 scope.go:117] "RemoveContainer" containerID="7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0" Jan 29 12:36:49 crc kubenswrapper[4852]: E0129 12:36:49.200277 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0\": container with ID starting with 7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0 not found: ID does not exist" containerID="7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.200317 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0"} err="failed to get container status \"7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0\": rpc error: code = NotFound desc = could not find container \"7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0\": container with ID starting with 7619e7730a3a85d8deadeea4001ca08d6d47078c745e62c398cfacbcdf8900b0 not found: ID does not exist" Jan 29 12:36:49 crc kubenswrapper[4852]: I0129 12:36:49.477273 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" path="/var/lib/kubelet/pods/60b7b1fc-ae6e-4b79-8202-661e06189e73/volumes" Jan 29 12:38:00 crc kubenswrapper[4852]: I0129 12:38:00.017979 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:38:00 crc kubenswrapper[4852]: I0129 12:38:00.018808 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:38:08 crc kubenswrapper[4852]: I0129 12:38:08.042191 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-m7s79"] Jan 29 12:38:08 crc kubenswrapper[4852]: I0129 12:38:08.056548 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-m7s79"] Jan 29 12:38:09 crc kubenswrapper[4852]: I0129 12:38:09.477545 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5a6620e-7c4b-40e5-b818-b44c5d5c123d" path="/var/lib/kubelet/pods/b5a6620e-7c4b-40e5-b818-b44c5d5c123d/volumes" Jan 29 12:38:10 crc kubenswrapper[4852]: I0129 12:38:10.031026 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-65d0-account-create-update-plgxr"] Jan 29 12:38:10 crc kubenswrapper[4852]: I0129 12:38:10.039206 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-65d0-account-create-update-plgxr"] Jan 29 12:38:11 crc kubenswrapper[4852]: I0129 12:38:11.480182 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a424b366-8e6d-47d8-818d-7cd5ac80081f" path="/var/lib/kubelet/pods/a424b366-8e6d-47d8-818d-7cd5ac80081f/volumes" Jan 29 12:38:30 crc kubenswrapper[4852]: I0129 12:38:30.017625 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:38:30 crc kubenswrapper[4852]: I0129 12:38:30.018152 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:38:39 crc kubenswrapper[4852]: I0129 12:38:39.040857 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-mc5d6"] Jan 29 12:38:39 crc kubenswrapper[4852]: I0129 12:38:39.052696 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-mc5d6"] Jan 29 12:38:39 crc kubenswrapper[4852]: I0129 12:38:39.474667 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3350cdb-cb7e-412b-8638-9ea88093fc95" path="/var/lib/kubelet/pods/f3350cdb-cb7e-412b-8638-9ea88093fc95/volumes" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.017257 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.017750 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.017794 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.018443 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.018492 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" gracePeriod=600 Jan 29 12:39:00 crc kubenswrapper[4852]: E0129 12:39:00.143494 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.446141 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" exitCode=0 Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.446198 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63"} Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.446239 4852 scope.go:117] "RemoveContainer" containerID="290e38974dcaec9c38148bf2f96f955bfe0835518ea91d8ce662210b9d779fb6" Jan 29 12:39:00 crc kubenswrapper[4852]: I0129 12:39:00.447098 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:39:00 crc kubenswrapper[4852]: E0129 12:39:00.447535 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:39:04 crc kubenswrapper[4852]: I0129 12:39:04.155723 4852 scope.go:117] "RemoveContainer" containerID="762141e5200b19617f328f1e6ca0b09983fa76bf45951f60619eb089758684fe" Jan 29 12:39:04 crc kubenswrapper[4852]: I0129 12:39:04.187415 4852 scope.go:117] "RemoveContainer" containerID="03d17b6866fc3463a74a364a92a8caba0d4b39328eee17c56b4d0454b7fff98e" Jan 29 12:39:04 crc kubenswrapper[4852]: I0129 12:39:04.245928 4852 scope.go:117] "RemoveContainer" containerID="8ca0135e77f98709c3eea226fdf1f287a43f8a18664cead78192f6272a51508b" Jan 29 12:39:13 crc kubenswrapper[4852]: I0129 12:39:13.474085 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:39:13 crc kubenswrapper[4852]: E0129 12:39:13.474945 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:39:16 crc kubenswrapper[4852]: I0129 12:39:16.057017 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-pdxw5"] Jan 29 12:39:16 crc kubenswrapper[4852]: I0129 12:39:16.071854 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-1c50-account-create-update-l8bdj"] Jan 29 12:39:16 crc kubenswrapper[4852]: I0129 12:39:16.082677 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-1c50-account-create-update-l8bdj"] Jan 29 12:39:16 crc kubenswrapper[4852]: I0129 12:39:16.092749 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-pdxw5"] Jan 29 12:39:17 crc kubenswrapper[4852]: I0129 12:39:17.486518 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d743e65-13db-4231-8871-a60e305a8974" path="/var/lib/kubelet/pods/8d743e65-13db-4231-8871-a60e305a8974/volumes" Jan 29 12:39:17 crc kubenswrapper[4852]: I0129 12:39:17.488659 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a15935b8-7558-4137-aef2-10a574579a5c" path="/var/lib/kubelet/pods/a15935b8-7558-4137-aef2-10a574579a5c/volumes" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.107429 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.110991 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111102 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111183 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111243 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111297 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111349 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111404 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111456 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111513 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111568 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111655 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111713 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111777 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111829 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="extract-content" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.111903 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.111966 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: E0129 12:39:22.112023 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.112075 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="extract-utilities" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.112368 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c8ba28-2ebb-417c-b777-74513e9362a6" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.112437 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="60b7b1fc-ae6e-4b79-8202-661e06189e73" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.112494 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa80c707-37d9-448c-9b5f-b318b0885cf5" containerName="registry-server" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.115099 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.121086 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.181787 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.182012 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwxj2\" (UniqueName: \"kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.182166 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.284271 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.284416 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwxj2\" (UniqueName: \"kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.284505 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.284911 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.285069 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.313824 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwxj2\" (UniqueName: \"kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2\") pod \"certified-operators-z9cbt\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:22 crc kubenswrapper[4852]: I0129 12:39:22.462538 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:23 crc kubenswrapper[4852]: I0129 12:39:23.163104 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:23 crc kubenswrapper[4852]: I0129 12:39:23.707152 4852 generic.go:334] "Generic (PLEG): container finished" podID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerID="3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d" exitCode=0 Jan 29 12:39:23 crc kubenswrapper[4852]: I0129 12:39:23.707262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerDied","Data":"3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d"} Jan 29 12:39:23 crc kubenswrapper[4852]: I0129 12:39:23.707443 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerStarted","Data":"5ce63c6140c6ae0dfeeaff1244aeffe252c8e1616d722774317d4237a2f1e571"} Jan 29 12:39:25 crc kubenswrapper[4852]: I0129 12:39:25.726947 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerStarted","Data":"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9"} Jan 29 12:39:27 crc kubenswrapper[4852]: I0129 12:39:27.463788 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:39:27 crc kubenswrapper[4852]: E0129 12:39:27.465639 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:39:27 crc kubenswrapper[4852]: I0129 12:39:27.747981 4852 generic.go:334] "Generic (PLEG): container finished" podID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerID="353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9" exitCode=0 Jan 29 12:39:27 crc kubenswrapper[4852]: I0129 12:39:27.748070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerDied","Data":"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9"} Jan 29 12:39:28 crc kubenswrapper[4852]: I0129 12:39:28.027218 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-pvkg8"] Jan 29 12:39:28 crc kubenswrapper[4852]: I0129 12:39:28.035865 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-pvkg8"] Jan 29 12:39:28 crc kubenswrapper[4852]: I0129 12:39:28.774434 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerStarted","Data":"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1"} Jan 29 12:39:28 crc kubenswrapper[4852]: I0129 12:39:28.822308 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z9cbt" podStartSLOduration=2.382257013 podStartE2EDuration="6.822287605s" podCreationTimestamp="2026-01-29 12:39:22 +0000 UTC" firstStartedPulling="2026-01-29 12:39:23.708858523 +0000 UTC m=+7060.926189657" lastFinishedPulling="2026-01-29 12:39:28.148889085 +0000 UTC m=+7065.366220249" observedRunningTime="2026-01-29 12:39:28.804557703 +0000 UTC m=+7066.021888857" watchObservedRunningTime="2026-01-29 12:39:28.822287605 +0000 UTC m=+7066.039618739" Jan 29 12:39:29 crc kubenswrapper[4852]: I0129 12:39:29.475786 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fec3d121-a2ad-4784-8722-5a62104ad91f" path="/var/lib/kubelet/pods/fec3d121-a2ad-4784-8722-5a62104ad91f/volumes" Jan 29 12:39:32 crc kubenswrapper[4852]: I0129 12:39:32.463090 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:32 crc kubenswrapper[4852]: I0129 12:39:32.463648 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:32 crc kubenswrapper[4852]: I0129 12:39:32.512636 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:39 crc kubenswrapper[4852]: I0129 12:39:39.463469 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:39:39 crc kubenswrapper[4852]: E0129 12:39:39.464366 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:39:42 crc kubenswrapper[4852]: I0129 12:39:42.516035 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:42 crc kubenswrapper[4852]: I0129 12:39:42.578195 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:42 crc kubenswrapper[4852]: I0129 12:39:42.899462 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z9cbt" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="registry-server" containerID="cri-o://a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1" gracePeriod=2 Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.442689 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.562734 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities\") pod \"fb0a42c4-636f-4660-bec8-76f0dce49fca\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.563771 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content\") pod \"fb0a42c4-636f-4660-bec8-76f0dce49fca\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.564035 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwxj2\" (UniqueName: \"kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2\") pod \"fb0a42c4-636f-4660-bec8-76f0dce49fca\" (UID: \"fb0a42c4-636f-4660-bec8-76f0dce49fca\") " Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.567334 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities" (OuterVolumeSpecName: "utilities") pod "fb0a42c4-636f-4660-bec8-76f0dce49fca" (UID: "fb0a42c4-636f-4660-bec8-76f0dce49fca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.569272 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2" (OuterVolumeSpecName: "kube-api-access-cwxj2") pod "fb0a42c4-636f-4660-bec8-76f0dce49fca" (UID: "fb0a42c4-636f-4660-bec8-76f0dce49fca"). InnerVolumeSpecName "kube-api-access-cwxj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.615958 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb0a42c4-636f-4660-bec8-76f0dce49fca" (UID: "fb0a42c4-636f-4660-bec8-76f0dce49fca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.665772 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwxj2\" (UniqueName: \"kubernetes.io/projected/fb0a42c4-636f-4660-bec8-76f0dce49fca-kube-api-access-cwxj2\") on node \"crc\" DevicePath \"\"" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.665811 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.665825 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb0a42c4-636f-4660-bec8-76f0dce49fca-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.912485 4852 generic.go:334] "Generic (PLEG): container finished" podID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerID="a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1" exitCode=0 Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.912541 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerDied","Data":"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1"} Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.912570 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9cbt" event={"ID":"fb0a42c4-636f-4660-bec8-76f0dce49fca","Type":"ContainerDied","Data":"5ce63c6140c6ae0dfeeaff1244aeffe252c8e1616d722774317d4237a2f1e571"} Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.912597 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9cbt" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.912625 4852 scope.go:117] "RemoveContainer" containerID="a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.952375 4852 scope.go:117] "RemoveContainer" containerID="353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9" Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.961155 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.971162 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z9cbt"] Jan 29 12:39:43 crc kubenswrapper[4852]: I0129 12:39:43.982009 4852 scope.go:117] "RemoveContainer" containerID="3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.022795 4852 scope.go:117] "RemoveContainer" containerID="a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1" Jan 29 12:39:44 crc kubenswrapper[4852]: E0129 12:39:44.023522 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1\": container with ID starting with a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1 not found: ID does not exist" containerID="a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.023608 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1"} err="failed to get container status \"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1\": rpc error: code = NotFound desc = could not find container \"a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1\": container with ID starting with a13b103d2dc19d775f37cbea0e3b184ececa60a7c7a40530a49d842b82f59ff1 not found: ID does not exist" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.023637 4852 scope.go:117] "RemoveContainer" containerID="353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9" Jan 29 12:39:44 crc kubenswrapper[4852]: E0129 12:39:44.023987 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9\": container with ID starting with 353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9 not found: ID does not exist" containerID="353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.024022 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9"} err="failed to get container status \"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9\": rpc error: code = NotFound desc = could not find container \"353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9\": container with ID starting with 353b713fe06e7e9a50297c79aa464d22a5a4b48a7f4fb7f54b06d3a825bfcbf9 not found: ID does not exist" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.024037 4852 scope.go:117] "RemoveContainer" containerID="3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d" Jan 29 12:39:44 crc kubenswrapper[4852]: E0129 12:39:44.024315 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d\": container with ID starting with 3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d not found: ID does not exist" containerID="3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d" Jan 29 12:39:44 crc kubenswrapper[4852]: I0129 12:39:44.024348 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d"} err="failed to get container status \"3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d\": rpc error: code = NotFound desc = could not find container \"3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d\": container with ID starting with 3c30498068f5372f760dce8266f55f6749e1d335b9446a3efe61f2894935082d not found: ID does not exist" Jan 29 12:39:45 crc kubenswrapper[4852]: I0129 12:39:45.477140 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" path="/var/lib/kubelet/pods/fb0a42c4-636f-4660-bec8-76f0dce49fca/volumes" Jan 29 12:39:53 crc kubenswrapper[4852]: I0129 12:39:53.470274 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:39:53 crc kubenswrapper[4852]: E0129 12:39:53.470982 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:40:04 crc kubenswrapper[4852]: I0129 12:40:04.423020 4852 scope.go:117] "RemoveContainer" containerID="9cdbfe80720acdbbbd0398878f6b133f4d371fefda295500af2f0ee1c2e8840f" Jan 29 12:40:04 crc kubenswrapper[4852]: I0129 12:40:04.453333 4852 scope.go:117] "RemoveContainer" containerID="ed7dd709b05b8904d8d7e42e124ff917b4ae2d1aad24c259ac25264e516460e1" Jan 29 12:40:04 crc kubenswrapper[4852]: I0129 12:40:04.502239 4852 scope.go:117] "RemoveContainer" containerID="4dc555e9bf650bd00339160c0bb4569bd21ee3dcb1a874dfd4236948b1056fbc" Jan 29 12:40:06 crc kubenswrapper[4852]: I0129 12:40:06.464120 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:40:06 crc kubenswrapper[4852]: E0129 12:40:06.465054 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:40:17 crc kubenswrapper[4852]: I0129 12:40:17.463172 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:40:17 crc kubenswrapper[4852]: E0129 12:40:17.464297 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:40:32 crc kubenswrapper[4852]: I0129 12:40:32.463732 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:40:32 crc kubenswrapper[4852]: E0129 12:40:32.464988 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:40:43 crc kubenswrapper[4852]: I0129 12:40:43.463518 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:40:43 crc kubenswrapper[4852]: E0129 12:40:43.464637 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:40:55 crc kubenswrapper[4852]: I0129 12:40:55.465760 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:40:55 crc kubenswrapper[4852]: E0129 12:40:55.466937 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:41:06 crc kubenswrapper[4852]: I0129 12:41:06.464161 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:41:06 crc kubenswrapper[4852]: E0129 12:41:06.465114 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:41:20 crc kubenswrapper[4852]: I0129 12:41:20.463484 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:41:20 crc kubenswrapper[4852]: E0129 12:41:20.464432 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:41:33 crc kubenswrapper[4852]: I0129 12:41:33.473202 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:41:33 crc kubenswrapper[4852]: E0129 12:41:33.474911 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:41:45 crc kubenswrapper[4852]: I0129 12:41:45.463619 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:41:45 crc kubenswrapper[4852]: E0129 12:41:45.471169 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:41:57 crc kubenswrapper[4852]: I0129 12:41:57.464715 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:41:57 crc kubenswrapper[4852]: E0129 12:41:57.465538 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:42:11 crc kubenswrapper[4852]: I0129 12:42:11.465353 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:42:11 crc kubenswrapper[4852]: E0129 12:42:11.468956 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:42:13 crc kubenswrapper[4852]: I0129 12:42:13.476048 4852 generic.go:334] "Generic (PLEG): container finished" podID="6a211413-55d7-4f37-a2ea-c452ecba4bcc" containerID="9bb8b6bf44a059cbe46af70792e242ee058c584cffff1a235bc7acf692cafd17" exitCode=0 Jan 29 12:42:13 crc kubenswrapper[4852]: I0129 12:42:13.476991 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" event={"ID":"6a211413-55d7-4f37-a2ea-c452ecba4bcc","Type":"ContainerDied","Data":"9bb8b6bf44a059cbe46af70792e242ee058c584cffff1a235bc7acf692cafd17"} Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.177716 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.219099 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle\") pod \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.219334 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph\") pod \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.219464 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mxmb\" (UniqueName: \"kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb\") pod \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.219557 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1\") pod \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.219663 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory\") pod \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\" (UID: \"6a211413-55d7-4f37-a2ea-c452ecba4bcc\") " Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.249854 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph" (OuterVolumeSpecName: "ceph") pod "6a211413-55d7-4f37-a2ea-c452ecba4bcc" (UID: "6a211413-55d7-4f37-a2ea-c452ecba4bcc"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.291095 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "6a211413-55d7-4f37-a2ea-c452ecba4bcc" (UID: "6a211413-55d7-4f37-a2ea-c452ecba4bcc"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.311890 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb" (OuterVolumeSpecName: "kube-api-access-7mxmb") pod "6a211413-55d7-4f37-a2ea-c452ecba4bcc" (UID: "6a211413-55d7-4f37-a2ea-c452ecba4bcc"). InnerVolumeSpecName "kube-api-access-7mxmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.323107 4852 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.323303 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.323394 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mxmb\" (UniqueName: \"kubernetes.io/projected/6a211413-55d7-4f37-a2ea-c452ecba4bcc-kube-api-access-7mxmb\") on node \"crc\" DevicePath \"\"" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.338913 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "6a211413-55d7-4f37-a2ea-c452ecba4bcc" (UID: "6a211413-55d7-4f37-a2ea-c452ecba4bcc"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.370844 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory" (OuterVolumeSpecName: "inventory") pod "6a211413-55d7-4f37-a2ea-c452ecba4bcc" (UID: "6a211413-55d7-4f37-a2ea-c452ecba4bcc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.425661 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.425863 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a211413-55d7-4f37-a2ea-c452ecba4bcc-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.496623 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" event={"ID":"6a211413-55d7-4f37-a2ea-c452ecba4bcc","Type":"ContainerDied","Data":"5dbacd627e5a831202fbefb51661c504dcca2b758436cd63420f1e0aa9e5a27e"} Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.497233 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5dbacd627e5a831202fbefb51661c504dcca2b758436cd63420f1e0aa9e5a27e" Jan 29 12:42:15 crc kubenswrapper[4852]: I0129 12:42:15.497318 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.798700 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-qt2w4"] Jan 29 12:42:22 crc kubenswrapper[4852]: E0129 12:42:22.800622 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="extract-content" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.800656 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="extract-content" Jan 29 12:42:22 crc kubenswrapper[4852]: E0129 12:42:22.800678 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="extract-utilities" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.800690 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="extract-utilities" Jan 29 12:42:22 crc kubenswrapper[4852]: E0129 12:42:22.800715 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a211413-55d7-4f37-a2ea-c452ecba4bcc" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.800728 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a211413-55d7-4f37-a2ea-c452ecba4bcc" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 12:42:22 crc kubenswrapper[4852]: E0129 12:42:22.800770 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="registry-server" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.800780 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="registry-server" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.801293 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a211413-55d7-4f37-a2ea-c452ecba4bcc" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.801351 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb0a42c4-636f-4660-bec8-76f0dce49fca" containerName="registry-server" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.802910 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.810711 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.810992 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.811091 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.811272 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.811781 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-qt2w4"] Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.894125 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.894190 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.894334 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.894656 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7sgw\" (UniqueName: \"kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.894734 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.998092 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7sgw\" (UniqueName: \"kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.998228 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.998277 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.998330 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:22 crc kubenswrapper[4852]: I0129 12:42:22.998433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.005160 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.005183 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.005601 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.006478 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.015505 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7sgw\" (UniqueName: \"kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw\") pod \"bootstrap-openstack-openstack-cell1-qt2w4\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.139126 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.703004 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-qt2w4"] Jan 29 12:42:23 crc kubenswrapper[4852]: I0129 12:42:23.705777 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:42:24 crc kubenswrapper[4852]: I0129 12:42:24.591803 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" event={"ID":"d212a77f-957e-4bc0-ae22-614da3a67d21","Type":"ContainerStarted","Data":"1d7b147287982b0d02f853a1c4f9c23244ee0e5c4a4b3b71ce3fbf2ee11fa95b"} Jan 29 12:42:24 crc kubenswrapper[4852]: I0129 12:42:24.592319 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" event={"ID":"d212a77f-957e-4bc0-ae22-614da3a67d21","Type":"ContainerStarted","Data":"4fe869d133c08f0706d8f2b31f8eef120b702a09feaa85bb36eedbf7a09c0bfc"} Jan 29 12:42:24 crc kubenswrapper[4852]: I0129 12:42:24.619375 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" podStartSLOduration=2.084519142 podStartE2EDuration="2.619350957s" podCreationTimestamp="2026-01-29 12:42:22 +0000 UTC" firstStartedPulling="2026-01-29 12:42:23.70548177 +0000 UTC m=+7240.922812904" lastFinishedPulling="2026-01-29 12:42:24.240313585 +0000 UTC m=+7241.457644719" observedRunningTime="2026-01-29 12:42:24.611364552 +0000 UTC m=+7241.828695716" watchObservedRunningTime="2026-01-29 12:42:24.619350957 +0000 UTC m=+7241.836682091" Jan 29 12:42:26 crc kubenswrapper[4852]: I0129 12:42:26.463797 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:42:26 crc kubenswrapper[4852]: E0129 12:42:26.464285 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:42:40 crc kubenswrapper[4852]: I0129 12:42:40.463684 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:42:40 crc kubenswrapper[4852]: E0129 12:42:40.464409 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:42:51 crc kubenswrapper[4852]: I0129 12:42:51.463316 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:42:51 crc kubenswrapper[4852]: E0129 12:42:51.464223 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:43:06 crc kubenswrapper[4852]: I0129 12:43:06.462930 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:43:06 crc kubenswrapper[4852]: E0129 12:43:06.463520 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:43:21 crc kubenswrapper[4852]: I0129 12:43:21.464027 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:43:21 crc kubenswrapper[4852]: E0129 12:43:21.464799 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:43:34 crc kubenswrapper[4852]: I0129 12:43:34.464622 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:43:34 crc kubenswrapper[4852]: E0129 12:43:34.465483 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:43:47 crc kubenswrapper[4852]: I0129 12:43:47.463905 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:43:47 crc kubenswrapper[4852]: E0129 12:43:47.464786 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:44:02 crc kubenswrapper[4852]: I0129 12:44:02.463746 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:44:03 crc kubenswrapper[4852]: I0129 12:44:03.598505 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f"} Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.160880 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n"] Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.164829 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.168205 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.168515 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.169742 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n"] Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.218555 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhgk6\" (UniqueName: \"kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.218666 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.218797 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.320316 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhgk6\" (UniqueName: \"kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.320376 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.320476 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.321261 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.330384 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.339835 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhgk6\" (UniqueName: \"kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6\") pod \"collect-profiles-29494845-8md5n\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:00 crc kubenswrapper[4852]: I0129 12:45:00.549275 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:01 crc kubenswrapper[4852]: I0129 12:45:01.030090 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n"] Jan 29 12:45:01 crc kubenswrapper[4852]: I0129 12:45:01.403109 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" event={"ID":"5a5e857d-921e-47da-8038-1b71b80384d5","Type":"ContainerStarted","Data":"4ae149e499e8e0c822a974b46632a0f2dbfffc75478f7b89ca810806259f9d1d"} Jan 29 12:45:01 crc kubenswrapper[4852]: I0129 12:45:01.403452 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" event={"ID":"5a5e857d-921e-47da-8038-1b71b80384d5","Type":"ContainerStarted","Data":"73103316e0412b44ece10ae14f535f897bd6276ddda0215b8da2e5ab95f67fbe"} Jan 29 12:45:01 crc kubenswrapper[4852]: I0129 12:45:01.423804 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" podStartSLOduration=1.423783733 podStartE2EDuration="1.423783733s" podCreationTimestamp="2026-01-29 12:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:45:01.4162488 +0000 UTC m=+7398.633579944" watchObservedRunningTime="2026-01-29 12:45:01.423783733 +0000 UTC m=+7398.641114877" Jan 29 12:45:02 crc kubenswrapper[4852]: I0129 12:45:02.415026 4852 generic.go:334] "Generic (PLEG): container finished" podID="5a5e857d-921e-47da-8038-1b71b80384d5" containerID="4ae149e499e8e0c822a974b46632a0f2dbfffc75478f7b89ca810806259f9d1d" exitCode=0 Jan 29 12:45:02 crc kubenswrapper[4852]: I0129 12:45:02.415107 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" event={"ID":"5a5e857d-921e-47da-8038-1b71b80384d5","Type":"ContainerDied","Data":"4ae149e499e8e0c822a974b46632a0f2dbfffc75478f7b89ca810806259f9d1d"} Jan 29 12:45:03 crc kubenswrapper[4852]: I0129 12:45:03.889920 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.003159 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhgk6\" (UniqueName: \"kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6\") pod \"5a5e857d-921e-47da-8038-1b71b80384d5\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.003242 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume\") pod \"5a5e857d-921e-47da-8038-1b71b80384d5\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.003288 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume\") pod \"5a5e857d-921e-47da-8038-1b71b80384d5\" (UID: \"5a5e857d-921e-47da-8038-1b71b80384d5\") " Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.004392 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume" (OuterVolumeSpecName: "config-volume") pod "5a5e857d-921e-47da-8038-1b71b80384d5" (UID: "5a5e857d-921e-47da-8038-1b71b80384d5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.023966 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5a5e857d-921e-47da-8038-1b71b80384d5" (UID: "5a5e857d-921e-47da-8038-1b71b80384d5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.024835 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6" (OuterVolumeSpecName: "kube-api-access-mhgk6") pod "5a5e857d-921e-47da-8038-1b71b80384d5" (UID: "5a5e857d-921e-47da-8038-1b71b80384d5"). InnerVolumeSpecName "kube-api-access-mhgk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.106252 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhgk6\" (UniqueName: \"kubernetes.io/projected/5a5e857d-921e-47da-8038-1b71b80384d5-kube-api-access-mhgk6\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.106288 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a5e857d-921e-47da-8038-1b71b80384d5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.106299 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a5e857d-921e-47da-8038-1b71b80384d5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.447742 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" event={"ID":"5a5e857d-921e-47da-8038-1b71b80384d5","Type":"ContainerDied","Data":"73103316e0412b44ece10ae14f535f897bd6276ddda0215b8da2e5ab95f67fbe"} Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.448013 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73103316e0412b44ece10ae14f535f897bd6276ddda0215b8da2e5ab95f67fbe" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.447885 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n" Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.511737 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj"] Jan 29 12:45:04 crc kubenswrapper[4852]: I0129 12:45:04.521106 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-s2wbj"] Jan 29 12:45:05 crc kubenswrapper[4852]: I0129 12:45:05.486243 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79f45844-2d28-463e-b303-22d2275003cd" path="/var/lib/kubelet/pods/79f45844-2d28-463e-b303-22d2275003cd/volumes" Jan 29 12:45:41 crc kubenswrapper[4852]: I0129 12:45:41.872117 4852 generic.go:334] "Generic (PLEG): container finished" podID="d212a77f-957e-4bc0-ae22-614da3a67d21" containerID="1d7b147287982b0d02f853a1c4f9c23244ee0e5c4a4b3b71ce3fbf2ee11fa95b" exitCode=0 Jan 29 12:45:41 crc kubenswrapper[4852]: I0129 12:45:41.872230 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" event={"ID":"d212a77f-957e-4bc0-ae22-614da3a67d21","Type":"ContainerDied","Data":"1d7b147287982b0d02f853a1c4f9c23244ee0e5c4a4b3b71ce3fbf2ee11fa95b"} Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.371699 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.478141 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph\") pod \"d212a77f-957e-4bc0-ae22-614da3a67d21\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.478217 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1\") pod \"d212a77f-957e-4bc0-ae22-614da3a67d21\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.478836 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory\") pod \"d212a77f-957e-4bc0-ae22-614da3a67d21\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.478898 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle\") pod \"d212a77f-957e-4bc0-ae22-614da3a67d21\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.478968 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7sgw\" (UniqueName: \"kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw\") pod \"d212a77f-957e-4bc0-ae22-614da3a67d21\" (UID: \"d212a77f-957e-4bc0-ae22-614da3a67d21\") " Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.484513 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph" (OuterVolumeSpecName: "ceph") pod "d212a77f-957e-4bc0-ae22-614da3a67d21" (UID: "d212a77f-957e-4bc0-ae22-614da3a67d21"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.485441 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw" (OuterVolumeSpecName: "kube-api-access-j7sgw") pod "d212a77f-957e-4bc0-ae22-614da3a67d21" (UID: "d212a77f-957e-4bc0-ae22-614da3a67d21"). InnerVolumeSpecName "kube-api-access-j7sgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.487827 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d212a77f-957e-4bc0-ae22-614da3a67d21" (UID: "d212a77f-957e-4bc0-ae22-614da3a67d21"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.515300 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "d212a77f-957e-4bc0-ae22-614da3a67d21" (UID: "d212a77f-957e-4bc0-ae22-614da3a67d21"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.520196 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory" (OuterVolumeSpecName: "inventory") pod "d212a77f-957e-4bc0-ae22-614da3a67d21" (UID: "d212a77f-957e-4bc0-ae22-614da3a67d21"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.582331 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.582375 4852 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.582390 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7sgw\" (UniqueName: \"kubernetes.io/projected/d212a77f-957e-4bc0-ae22-614da3a67d21-kube-api-access-j7sgw\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.582405 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.582418 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d212a77f-957e-4bc0-ae22-614da3a67d21-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.894670 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" event={"ID":"d212a77f-957e-4bc0-ae22-614da3a67d21","Type":"ContainerDied","Data":"4fe869d133c08f0706d8f2b31f8eef120b702a09feaa85bb36eedbf7a09c0bfc"} Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.895014 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fe869d133c08f0706d8f2b31f8eef120b702a09feaa85bb36eedbf7a09c0bfc" Jan 29 12:45:43 crc kubenswrapper[4852]: I0129 12:45:43.894719 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-qt2w4" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.009526 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-hz8sq"] Jan 29 12:45:44 crc kubenswrapper[4852]: E0129 12:45:44.010006 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5e857d-921e-47da-8038-1b71b80384d5" containerName="collect-profiles" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.010022 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5e857d-921e-47da-8038-1b71b80384d5" containerName="collect-profiles" Jan 29 12:45:44 crc kubenswrapper[4852]: E0129 12:45:44.010046 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d212a77f-957e-4bc0-ae22-614da3a67d21" containerName="bootstrap-openstack-openstack-cell1" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.010052 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d212a77f-957e-4bc0-ae22-614da3a67d21" containerName="bootstrap-openstack-openstack-cell1" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.010221 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a5e857d-921e-47da-8038-1b71b80384d5" containerName="collect-profiles" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.010245 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d212a77f-957e-4bc0-ae22-614da3a67d21" containerName="bootstrap-openstack-openstack-cell1" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.011000 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.016390 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.016501 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.016693 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.018103 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.020939 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-hz8sq"] Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.195495 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.195608 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.195699 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp6tn\" (UniqueName: \"kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.195736 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.297251 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp6tn\" (UniqueName: \"kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.297370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.297469 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.297515 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.311094 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.311203 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.313008 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.313846 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp6tn\" (UniqueName: \"kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn\") pod \"download-cache-openstack-openstack-cell1-hz8sq\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.332268 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:45:44 crc kubenswrapper[4852]: I0129 12:45:44.903890 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-hz8sq"] Jan 29 12:45:45 crc kubenswrapper[4852]: I0129 12:45:45.917208 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" event={"ID":"767e03a5-76cc-49ce-a6b5-0c18616f0405","Type":"ContainerStarted","Data":"cbbdd936b33c672c9378aafdc01799423b33be118ccb160a0585e246475ed17c"} Jan 29 12:45:45 crc kubenswrapper[4852]: I0129 12:45:45.917789 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" event={"ID":"767e03a5-76cc-49ce-a6b5-0c18616f0405","Type":"ContainerStarted","Data":"8b62364ffff00a605533eba0c9c1b29a1f278bb71d4fae5d2c0c32d4ba7f67e4"} Jan 29 12:45:45 crc kubenswrapper[4852]: I0129 12:45:45.940656 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" podStartSLOduration=2.40611905 podStartE2EDuration="2.940629088s" podCreationTimestamp="2026-01-29 12:45:43 +0000 UTC" firstStartedPulling="2026-01-29 12:45:44.907676271 +0000 UTC m=+7442.125007405" lastFinishedPulling="2026-01-29 12:45:45.442186309 +0000 UTC m=+7442.659517443" observedRunningTime="2026-01-29 12:45:45.933934095 +0000 UTC m=+7443.151265249" watchObservedRunningTime="2026-01-29 12:45:45.940629088 +0000 UTC m=+7443.157960252" Jan 29 12:46:04 crc kubenswrapper[4852]: I0129 12:46:04.725952 4852 scope.go:117] "RemoveContainer" containerID="7ea58d16a7baaf3e8d62a16c2880605cd00a97ad317fc41715decd16a7886f1e" Jan 29 12:46:23 crc kubenswrapper[4852]: I0129 12:46:23.927939 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:23 crc kubenswrapper[4852]: I0129 12:46:23.932403 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:23 crc kubenswrapper[4852]: I0129 12:46:23.946204 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.065243 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.065363 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.065458 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9gwk\" (UniqueName: \"kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.167671 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.167808 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.167911 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9gwk\" (UniqueName: \"kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.168394 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.168526 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.200841 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9gwk\" (UniqueName: \"kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk\") pod \"community-operators-zngzr\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.277818 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:24 crc kubenswrapper[4852]: I0129 12:46:24.946187 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:25 crc kubenswrapper[4852]: I0129 12:46:25.334441 4852 generic.go:334] "Generic (PLEG): container finished" podID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerID="d1eee08c30b31cf8efcd04a1f47a9c9b65ddb454d53d315d1f0ac6fcc1622942" exitCode=0 Jan 29 12:46:25 crc kubenswrapper[4852]: I0129 12:46:25.334564 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerDied","Data":"d1eee08c30b31cf8efcd04a1f47a9c9b65ddb454d53d315d1f0ac6fcc1622942"} Jan 29 12:46:25 crc kubenswrapper[4852]: I0129 12:46:25.334772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerStarted","Data":"8792694bbe463a97e43b3ca3b02fdeb8daa3c504668bfb416cf2649ef863e03b"} Jan 29 12:46:27 crc kubenswrapper[4852]: I0129 12:46:27.356390 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerStarted","Data":"8c287d6e041d8a71828e9c3392ddf396e54990c0195dfb1cac66b072aac37634"} Jan 29 12:46:29 crc kubenswrapper[4852]: I0129 12:46:29.978759 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:29 crc kubenswrapper[4852]: I0129 12:46:29.983935 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:29.999242 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.022229 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.022284 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.028953 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-857xk\" (UniqueName: \"kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.029003 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.029038 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.130525 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-857xk\" (UniqueName: \"kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.130570 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.130614 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.131226 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.131566 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.153221 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-857xk\" (UniqueName: \"kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk\") pod \"redhat-marketplace-j288t\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.367559 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.383002 4852 generic.go:334] "Generic (PLEG): container finished" podID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerID="8c287d6e041d8a71828e9c3392ddf396e54990c0195dfb1cac66b072aac37634" exitCode=0 Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.383045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerDied","Data":"8c287d6e041d8a71828e9c3392ddf396e54990c0195dfb1cac66b072aac37634"} Jan 29 12:46:30 crc kubenswrapper[4852]: I0129 12:46:30.932880 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:30 crc kubenswrapper[4852]: W0129 12:46:30.938114 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483 WatchSource:0}: Error finding container 2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483: Status 404 returned error can't find the container with id 2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483 Jan 29 12:46:31 crc kubenswrapper[4852]: I0129 12:46:31.394898 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerStarted","Data":"aa7df4943d91236922acc454d5f7a01f07a9e968e8d96a9e0a66840141e55b10"} Jan 29 12:46:31 crc kubenswrapper[4852]: I0129 12:46:31.399046 4852 generic.go:334] "Generic (PLEG): container finished" podID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerID="ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4" exitCode=0 Jan 29 12:46:31 crc kubenswrapper[4852]: I0129 12:46:31.399158 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerDied","Data":"ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4"} Jan 29 12:46:31 crc kubenswrapper[4852]: I0129 12:46:31.399433 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerStarted","Data":"2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483"} Jan 29 12:46:31 crc kubenswrapper[4852]: I0129 12:46:31.424130 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zngzr" podStartSLOduration=2.9068560469999998 podStartE2EDuration="8.424106863s" podCreationTimestamp="2026-01-29 12:46:23 +0000 UTC" firstStartedPulling="2026-01-29 12:46:25.33705152 +0000 UTC m=+7482.554382674" lastFinishedPulling="2026-01-29 12:46:30.854302356 +0000 UTC m=+7488.071633490" observedRunningTime="2026-01-29 12:46:31.417991164 +0000 UTC m=+7488.635322288" watchObservedRunningTime="2026-01-29 12:46:31.424106863 +0000 UTC m=+7488.641437997" Jan 29 12:46:32 crc kubenswrapper[4852]: I0129 12:46:32.414064 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerStarted","Data":"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee"} Jan 29 12:46:33 crc kubenswrapper[4852]: I0129 12:46:33.425246 4852 generic.go:334] "Generic (PLEG): container finished" podID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerID="8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee" exitCode=0 Jan 29 12:46:33 crc kubenswrapper[4852]: I0129 12:46:33.425493 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerDied","Data":"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee"} Jan 29 12:46:34 crc kubenswrapper[4852]: I0129 12:46:34.278152 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:34 crc kubenswrapper[4852]: I0129 12:46:34.278463 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:34 crc kubenswrapper[4852]: I0129 12:46:34.337448 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:34 crc kubenswrapper[4852]: I0129 12:46:34.445221 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerStarted","Data":"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071"} Jan 29 12:46:34 crc kubenswrapper[4852]: I0129 12:46:34.473268 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j288t" podStartSLOduration=2.978600657 podStartE2EDuration="5.473239832s" podCreationTimestamp="2026-01-29 12:46:29 +0000 UTC" firstStartedPulling="2026-01-29 12:46:31.401727538 +0000 UTC m=+7488.619058672" lastFinishedPulling="2026-01-29 12:46:33.896366713 +0000 UTC m=+7491.113697847" observedRunningTime="2026-01-29 12:46:34.462241854 +0000 UTC m=+7491.679572988" watchObservedRunningTime="2026-01-29 12:46:34.473239832 +0000 UTC m=+7491.690570966" Jan 29 12:46:40 crc kubenswrapper[4852]: I0129 12:46:40.368287 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:40 crc kubenswrapper[4852]: I0129 12:46:40.368920 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:40 crc kubenswrapper[4852]: I0129 12:46:40.425739 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:40 crc kubenswrapper[4852]: I0129 12:46:40.558533 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:40 crc kubenswrapper[4852]: I0129 12:46:40.660398 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:42 crc kubenswrapper[4852]: I0129 12:46:42.526044 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j288t" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="registry-server" containerID="cri-o://f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071" gracePeriod=2 Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.012689 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.131087 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities\") pod \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.131248 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content\") pod \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.131454 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-857xk\" (UniqueName: \"kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk\") pod \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\" (UID: \"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc\") " Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.132262 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities" (OuterVolumeSpecName: "utilities") pod "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" (UID: "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.134042 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.138559 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk" (OuterVolumeSpecName: "kube-api-access-857xk") pod "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" (UID: "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc"). InnerVolumeSpecName "kube-api-access-857xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.157443 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" (UID: "abe24ba2-2e8c-4d52-87ed-49c6cb6daabc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.236201 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.236236 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-857xk\" (UniqueName: \"kubernetes.io/projected/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc-kube-api-access-857xk\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.538694 4852 generic.go:334] "Generic (PLEG): container finished" podID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerID="f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071" exitCode=0 Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.538772 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerDied","Data":"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071"} Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.538837 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j288t" event={"ID":"abe24ba2-2e8c-4d52-87ed-49c6cb6daabc","Type":"ContainerDied","Data":"2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483"} Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.538839 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j288t" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.538863 4852 scope.go:117] "RemoveContainer" containerID="f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.570056 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.573710 4852 scope.go:117] "RemoveContainer" containerID="8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.579642 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j288t"] Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.591932 4852 scope.go:117] "RemoveContainer" containerID="ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.635605 4852 scope.go:117] "RemoveContainer" containerID="f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071" Jan 29 12:46:43 crc kubenswrapper[4852]: E0129 12:46:43.636168 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071\": container with ID starting with f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071 not found: ID does not exist" containerID="f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.636232 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071"} err="failed to get container status \"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071\": rpc error: code = NotFound desc = could not find container \"f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071\": container with ID starting with f0a759663cef516b9fec83ed3490e55065ac271b24e547934fa96e9ea1505071 not found: ID does not exist" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.636271 4852 scope.go:117] "RemoveContainer" containerID="8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee" Jan 29 12:46:43 crc kubenswrapper[4852]: E0129 12:46:43.636663 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee\": container with ID starting with 8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee not found: ID does not exist" containerID="8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.636699 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee"} err="failed to get container status \"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee\": rpc error: code = NotFound desc = could not find container \"8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee\": container with ID starting with 8c6e8016fb40c301dcc1bdf31367b73fe7ad4b2d5e28a3b093eca92cbf30ecee not found: ID does not exist" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.636727 4852 scope.go:117] "RemoveContainer" containerID="ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4" Jan 29 12:46:43 crc kubenswrapper[4852]: E0129 12:46:43.637090 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4\": container with ID starting with ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4 not found: ID does not exist" containerID="ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4" Jan 29 12:46:43 crc kubenswrapper[4852]: I0129 12:46:43.637141 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4"} err="failed to get container status \"ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4\": rpc error: code = NotFound desc = could not find container \"ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4\": container with ID starting with ccdc3caa124595013eb01f62f0be75128b5e5da92e34dd97ff3d20418a3bdff4 not found: ID does not exist" Jan 29 12:46:44 crc kubenswrapper[4852]: I0129 12:46:44.341118 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.065148 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.066425 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zngzr" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="registry-server" containerID="cri-o://aa7df4943d91236922acc454d5f7a01f07a9e968e8d96a9e0a66840141e55b10" gracePeriod=2 Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.481111 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" path="/var/lib/kubelet/pods/abe24ba2-2e8c-4d52-87ed-49c6cb6daabc/volumes" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.563761 4852 generic.go:334] "Generic (PLEG): container finished" podID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerID="aa7df4943d91236922acc454d5f7a01f07a9e968e8d96a9e0a66840141e55b10" exitCode=0 Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.563852 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerDied","Data":"aa7df4943d91236922acc454d5f7a01f07a9e968e8d96a9e0a66840141e55b10"} Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.564129 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zngzr" event={"ID":"0acfa1c8-6769-4383-8e4e-44abfb9c17b2","Type":"ContainerDied","Data":"8792694bbe463a97e43b3ca3b02fdeb8daa3c504668bfb416cf2649ef863e03b"} Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.564154 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8792694bbe463a97e43b3ca3b02fdeb8daa3c504668bfb416cf2649ef863e03b" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.570006 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.693086 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content\") pod \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.693212 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities\") pod \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.693271 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9gwk\" (UniqueName: \"kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk\") pod \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\" (UID: \"0acfa1c8-6769-4383-8e4e-44abfb9c17b2\") " Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.693976 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities" (OuterVolumeSpecName: "utilities") pod "0acfa1c8-6769-4383-8e4e-44abfb9c17b2" (UID: "0acfa1c8-6769-4383-8e4e-44abfb9c17b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.698849 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk" (OuterVolumeSpecName: "kube-api-access-w9gwk") pod "0acfa1c8-6769-4383-8e4e-44abfb9c17b2" (UID: "0acfa1c8-6769-4383-8e4e-44abfb9c17b2"). InnerVolumeSpecName "kube-api-access-w9gwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.745790 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0acfa1c8-6769-4383-8e4e-44abfb9c17b2" (UID: "0acfa1c8-6769-4383-8e4e-44abfb9c17b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.796005 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.796056 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:45 crc kubenswrapper[4852]: I0129 12:46:45.796071 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9gwk\" (UniqueName: \"kubernetes.io/projected/0acfa1c8-6769-4383-8e4e-44abfb9c17b2-kube-api-access-w9gwk\") on node \"crc\" DevicePath \"\"" Jan 29 12:46:46 crc kubenswrapper[4852]: I0129 12:46:46.573922 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zngzr" Jan 29 12:46:46 crc kubenswrapper[4852]: I0129 12:46:46.627224 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:46 crc kubenswrapper[4852]: I0129 12:46:46.637279 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zngzr"] Jan 29 12:46:47 crc kubenswrapper[4852]: E0129 12:46:47.072290 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:46:47 crc kubenswrapper[4852]: I0129 12:46:47.477851 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" path="/var/lib/kubelet/pods/0acfa1c8-6769-4383-8e4e-44abfb9c17b2/volumes" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.589758 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591036 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="extract-utilities" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591064 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="extract-utilities" Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591095 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="extract-content" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591108 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="extract-content" Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591145 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="extract-utilities" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591158 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="extract-utilities" Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591182 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591192 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="extract-content" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591224 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="extract-content" Jan 29 12:46:49 crc kubenswrapper[4852]: E0129 12:46:49.591263 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591275 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591766 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="abe24ba2-2e8c-4d52-87ed-49c6cb6daabc" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.591799 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acfa1c8-6769-4383-8e4e-44abfb9c17b2" containerName="registry-server" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.593873 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.616030 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.689041 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.689090 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.689128 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzqhk\" (UniqueName: \"kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.790718 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.790776 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.790817 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzqhk\" (UniqueName: \"kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.791505 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.791750 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.813386 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzqhk\" (UniqueName: \"kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk\") pod \"redhat-operators-hfkll\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:49 crc kubenswrapper[4852]: I0129 12:46:49.926274 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:46:50 crc kubenswrapper[4852]: I0129 12:46:50.478667 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:46:50 crc kubenswrapper[4852]: I0129 12:46:50.616110 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerStarted","Data":"fad008cceae5dfa6214bc7234f8760df7a64628b1926370665a3cedb43b23982"} Jan 29 12:46:51 crc kubenswrapper[4852]: I0129 12:46:51.628768 4852 generic.go:334] "Generic (PLEG): container finished" podID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerID="5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e" exitCode=0 Jan 29 12:46:51 crc kubenswrapper[4852]: I0129 12:46:51.628832 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerDied","Data":"5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e"} Jan 29 12:46:52 crc kubenswrapper[4852]: I0129 12:46:52.645684 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerStarted","Data":"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb"} Jan 29 12:46:57 crc kubenswrapper[4852]: E0129 12:46:57.357410 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:47:00 crc kubenswrapper[4852]: I0129 12:47:00.018058 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:47:00 crc kubenswrapper[4852]: I0129 12:47:00.018739 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:47:00 crc kubenswrapper[4852]: I0129 12:47:00.728522 4852 generic.go:334] "Generic (PLEG): container finished" podID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerID="2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb" exitCode=0 Jan 29 12:47:00 crc kubenswrapper[4852]: I0129 12:47:00.728567 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerDied","Data":"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb"} Jan 29 12:47:02 crc kubenswrapper[4852]: I0129 12:47:02.747944 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerStarted","Data":"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087"} Jan 29 12:47:02 crc kubenswrapper[4852]: I0129 12:47:02.773322 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hfkll" podStartSLOduration=3.938372771 podStartE2EDuration="13.773303711s" podCreationTimestamp="2026-01-29 12:46:49 +0000 UTC" firstStartedPulling="2026-01-29 12:46:51.631545605 +0000 UTC m=+7508.848876739" lastFinishedPulling="2026-01-29 12:47:01.466476545 +0000 UTC m=+7518.683807679" observedRunningTime="2026-01-29 12:47:02.770172885 +0000 UTC m=+7519.987504039" watchObservedRunningTime="2026-01-29 12:47:02.773303711 +0000 UTC m=+7519.990634835" Jan 29 12:47:07 crc kubenswrapper[4852]: E0129 12:47:07.661705 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:47:09 crc kubenswrapper[4852]: I0129 12:47:09.927131 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:09 crc kubenswrapper[4852]: I0129 12:47:09.928625 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:10 crc kubenswrapper[4852]: I0129 12:47:10.993953 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hfkll" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="registry-server" probeResult="failure" output=< Jan 29 12:47:10 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:47:10 crc kubenswrapper[4852]: > Jan 29 12:47:17 crc kubenswrapper[4852]: E0129 12:47:17.921406 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:47:18 crc kubenswrapper[4852]: I0129 12:47:18.949481 4852 generic.go:334] "Generic (PLEG): container finished" podID="767e03a5-76cc-49ce-a6b5-0c18616f0405" containerID="cbbdd936b33c672c9378aafdc01799423b33be118ccb160a0585e246475ed17c" exitCode=0 Jan 29 12:47:18 crc kubenswrapper[4852]: I0129 12:47:18.949635 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" event={"ID":"767e03a5-76cc-49ce-a6b5-0c18616f0405","Type":"ContainerDied","Data":"cbbdd936b33c672c9378aafdc01799423b33be118ccb160a0585e246475ed17c"} Jan 29 12:47:19 crc kubenswrapper[4852]: I0129 12:47:19.979375 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.038066 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.489136 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.640724 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1\") pod \"767e03a5-76cc-49ce-a6b5-0c18616f0405\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.640862 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph\") pod \"767e03a5-76cc-49ce-a6b5-0c18616f0405\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.641028 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zp6tn\" (UniqueName: \"kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn\") pod \"767e03a5-76cc-49ce-a6b5-0c18616f0405\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.641150 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory\") pod \"767e03a5-76cc-49ce-a6b5-0c18616f0405\" (UID: \"767e03a5-76cc-49ce-a6b5-0c18616f0405\") " Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.646233 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn" (OuterVolumeSpecName: "kube-api-access-zp6tn") pod "767e03a5-76cc-49ce-a6b5-0c18616f0405" (UID: "767e03a5-76cc-49ce-a6b5-0c18616f0405"). InnerVolumeSpecName "kube-api-access-zp6tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.646413 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph" (OuterVolumeSpecName: "ceph") pod "767e03a5-76cc-49ce-a6b5-0c18616f0405" (UID: "767e03a5-76cc-49ce-a6b5-0c18616f0405"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.673342 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory" (OuterVolumeSpecName: "inventory") pod "767e03a5-76cc-49ce-a6b5-0c18616f0405" (UID: "767e03a5-76cc-49ce-a6b5-0c18616f0405"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.688394 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "767e03a5-76cc-49ce-a6b5-0c18616f0405" (UID: "767e03a5-76cc-49ce-a6b5-0c18616f0405"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.744476 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zp6tn\" (UniqueName: \"kubernetes.io/projected/767e03a5-76cc-49ce-a6b5-0c18616f0405-kube-api-access-zp6tn\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.744561 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.744600 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.744614 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/767e03a5-76cc-49ce-a6b5-0c18616f0405-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.791296 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.973001 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" event={"ID":"767e03a5-76cc-49ce-a6b5-0c18616f0405","Type":"ContainerDied","Data":"8b62364ffff00a605533eba0c9c1b29a1f278bb71d4fae5d2c0c32d4ba7f67e4"} Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.973496 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b62364ffff00a605533eba0c9c1b29a1f278bb71d4fae5d2c0c32d4ba7f67e4" Jan 29 12:47:20 crc kubenswrapper[4852]: I0129 12:47:20.973137 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-hz8sq" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.057383 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-wrb7z"] Jan 29 12:47:21 crc kubenswrapper[4852]: E0129 12:47:21.057884 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="767e03a5-76cc-49ce-a6b5-0c18616f0405" containerName="download-cache-openstack-openstack-cell1" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.057897 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="767e03a5-76cc-49ce-a6b5-0c18616f0405" containerName="download-cache-openstack-openstack-cell1" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.058134 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="767e03a5-76cc-49ce-a6b5-0c18616f0405" containerName="download-cache-openstack-openstack-cell1" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.058959 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.061196 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.061861 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.061869 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.067731 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.068051 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-wrb7z"] Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.255214 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.255909 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.256031 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.256135 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfrtc\" (UniqueName: \"kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.358419 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.358533 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.358659 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.358766 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfrtc\" (UniqueName: \"kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.363139 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.363435 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.363460 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.391158 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfrtc\" (UniqueName: \"kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc\") pod \"configure-network-openstack-openstack-cell1-wrb7z\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.684551 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:47:21 crc kubenswrapper[4852]: I0129 12:47:21.981902 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hfkll" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="registry-server" containerID="cri-o://a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087" gracePeriod=2 Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.304436 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-wrb7z"] Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.537794 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.587422 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzqhk\" (UniqueName: \"kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk\") pod \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.587519 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content\") pod \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.587650 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities\") pod \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\" (UID: \"16e74c6f-c646-475a-8f42-61c0dd6dbc3a\") " Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.588460 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities" (OuterVolumeSpecName: "utilities") pod "16e74c6f-c646-475a-8f42-61c0dd6dbc3a" (UID: "16e74c6f-c646-475a-8f42-61c0dd6dbc3a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.593787 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk" (OuterVolumeSpecName: "kube-api-access-jzqhk") pod "16e74c6f-c646-475a-8f42-61c0dd6dbc3a" (UID: "16e74c6f-c646-475a-8f42-61c0dd6dbc3a"). InnerVolumeSpecName "kube-api-access-jzqhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.689475 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.689503 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzqhk\" (UniqueName: \"kubernetes.io/projected/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-kube-api-access-jzqhk\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.705451 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16e74c6f-c646-475a-8f42-61c0dd6dbc3a" (UID: "16e74c6f-c646-475a-8f42-61c0dd6dbc3a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.791762 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16e74c6f-c646-475a-8f42-61c0dd6dbc3a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.993687 4852 generic.go:334] "Generic (PLEG): container finished" podID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerID="a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087" exitCode=0 Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.993779 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerDied","Data":"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087"} Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.993815 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hfkll" event={"ID":"16e74c6f-c646-475a-8f42-61c0dd6dbc3a","Type":"ContainerDied","Data":"fad008cceae5dfa6214bc7234f8760df7a64628b1926370665a3cedb43b23982"} Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.993836 4852 scope.go:117] "RemoveContainer" containerID="a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087" Jan 29 12:47:22 crc kubenswrapper[4852]: I0129 12:47:22.993865 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hfkll" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.004367 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" event={"ID":"7d79020b-902b-425d-ab7d-d73c666d7582","Type":"ContainerStarted","Data":"17939dd03b7d8ce2e47f5353b4d591cdb2778a41d70b696b8acfd43fb9221d69"} Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.068897 4852 scope.go:117] "RemoveContainer" containerID="2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.097234 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.106487 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hfkll"] Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.124564 4852 scope.go:117] "RemoveContainer" containerID="5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.152250 4852 scope.go:117] "RemoveContainer" containerID="a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087" Jan 29 12:47:23 crc kubenswrapper[4852]: E0129 12:47:23.152728 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087\": container with ID starting with a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087 not found: ID does not exist" containerID="a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.152791 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087"} err="failed to get container status \"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087\": rpc error: code = NotFound desc = could not find container \"a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087\": container with ID starting with a2578093c378bed38fe02a03711e2a9f07e3b8ceeeebd6af959fe32ed2b98087 not found: ID does not exist" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.152826 4852 scope.go:117] "RemoveContainer" containerID="2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb" Jan 29 12:47:23 crc kubenswrapper[4852]: E0129 12:47:23.153527 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb\": container with ID starting with 2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb not found: ID does not exist" containerID="2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.153679 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb"} err="failed to get container status \"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb\": rpc error: code = NotFound desc = could not find container \"2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb\": container with ID starting with 2b0d5222b7c5450552f48421fd2d3a27cc10fe334c8d75c730a1f14114953deb not found: ID does not exist" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.153720 4852 scope.go:117] "RemoveContainer" containerID="5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e" Jan 29 12:47:23 crc kubenswrapper[4852]: E0129 12:47:23.154066 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e\": container with ID starting with 5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e not found: ID does not exist" containerID="5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.154102 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e"} err="failed to get container status \"5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e\": rpc error: code = NotFound desc = could not find container \"5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e\": container with ID starting with 5529a52f51f3f082d075f99df26ba8e543d19153b0f08b347111d4a757d9b82e not found: ID does not exist" Jan 29 12:47:23 crc kubenswrapper[4852]: I0129 12:47:23.481794 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" path="/var/lib/kubelet/pods/16e74c6f-c646-475a-8f42-61c0dd6dbc3a/volumes" Jan 29 12:47:24 crc kubenswrapper[4852]: I0129 12:47:24.022635 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" event={"ID":"7d79020b-902b-425d-ab7d-d73c666d7582","Type":"ContainerStarted","Data":"36e4177740ce1f3ab577251144770849b9888682f7a0194f63f6488216388ef1"} Jan 29 12:47:24 crc kubenswrapper[4852]: I0129 12:47:24.049640 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" podStartSLOduration=2.457949014 podStartE2EDuration="3.049622264s" podCreationTimestamp="2026-01-29 12:47:21 +0000 UTC" firstStartedPulling="2026-01-29 12:47:22.344522668 +0000 UTC m=+7539.561853802" lastFinishedPulling="2026-01-29 12:47:22.936195918 +0000 UTC m=+7540.153527052" observedRunningTime="2026-01-29 12:47:24.048051306 +0000 UTC m=+7541.265382440" watchObservedRunningTime="2026-01-29 12:47:24.049622264 +0000 UTC m=+7541.266953398" Jan 29 12:47:28 crc kubenswrapper[4852]: E0129 12:47:28.177463 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:47:30 crc kubenswrapper[4852]: I0129 12:47:30.017721 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:47:30 crc kubenswrapper[4852]: I0129 12:47:30.018281 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:47:30 crc kubenswrapper[4852]: I0129 12:47:30.018333 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:47:30 crc kubenswrapper[4852]: I0129 12:47:30.019252 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:47:30 crc kubenswrapper[4852]: I0129 12:47:30.019309 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f" gracePeriod=600 Jan 29 12:47:31 crc kubenswrapper[4852]: I0129 12:47:31.107618 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f" exitCode=0 Jan 29 12:47:31 crc kubenswrapper[4852]: I0129 12:47:31.107822 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f"} Jan 29 12:47:31 crc kubenswrapper[4852]: I0129 12:47:31.108108 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35"} Jan 29 12:47:31 crc kubenswrapper[4852]: I0129 12:47:31.108131 4852 scope.go:117] "RemoveContainer" containerID="e2779739d379981a92c0305c82ac93bcf4bb2379e7500a60c8f3875978001f63" Jan 29 12:47:38 crc kubenswrapper[4852]: E0129 12:47:38.456526 4852 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podabe24ba2_2e8c_4d52_87ed_49c6cb6daabc.slice/crio-2f6a95b878b69893d1add12421b67e64d6d41396de22e92cd131c6de1f35f483\": RecentStats: unable to find data in memory cache]" Jan 29 12:47:43 crc kubenswrapper[4852]: E0129 12:47:43.507762 4852 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/154c51e11b9ceeb0fc82e906b4cba59ddad7a5a6df9b4eda26535c9b646907cd/diff" to get inode usage: stat /var/lib/containers/storage/overlay/154c51e11b9ceeb0fc82e906b4cba59ddad7a5a6df9b4eda26535c9b646907cd/diff: no such file or directory, extraDiskErr: Jan 29 12:48:39 crc kubenswrapper[4852]: I0129 12:48:39.786099 4852 generic.go:334] "Generic (PLEG): container finished" podID="7d79020b-902b-425d-ab7d-d73c666d7582" containerID="36e4177740ce1f3ab577251144770849b9888682f7a0194f63f6488216388ef1" exitCode=0 Jan 29 12:48:39 crc kubenswrapper[4852]: I0129 12:48:39.786145 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" event={"ID":"7d79020b-902b-425d-ab7d-d73c666d7582","Type":"ContainerDied","Data":"36e4177740ce1f3ab577251144770849b9888682f7a0194f63f6488216388ef1"} Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.347014 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.410082 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1\") pod \"7d79020b-902b-425d-ab7d-d73c666d7582\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.410164 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfrtc\" (UniqueName: \"kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc\") pod \"7d79020b-902b-425d-ab7d-d73c666d7582\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.410349 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory\") pod \"7d79020b-902b-425d-ab7d-d73c666d7582\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.410385 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph\") pod \"7d79020b-902b-425d-ab7d-d73c666d7582\" (UID: \"7d79020b-902b-425d-ab7d-d73c666d7582\") " Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.418655 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph" (OuterVolumeSpecName: "ceph") pod "7d79020b-902b-425d-ab7d-d73c666d7582" (UID: "7d79020b-902b-425d-ab7d-d73c666d7582"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.418653 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc" (OuterVolumeSpecName: "kube-api-access-sfrtc") pod "7d79020b-902b-425d-ab7d-d73c666d7582" (UID: "7d79020b-902b-425d-ab7d-d73c666d7582"). InnerVolumeSpecName "kube-api-access-sfrtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.454775 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "7d79020b-902b-425d-ab7d-d73c666d7582" (UID: "7d79020b-902b-425d-ab7d-d73c666d7582"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.455167 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory" (OuterVolumeSpecName: "inventory") pod "7d79020b-902b-425d-ab7d-d73c666d7582" (UID: "7d79020b-902b-425d-ab7d-d73c666d7582"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.513299 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.513346 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.513361 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfrtc\" (UniqueName: \"kubernetes.io/projected/7d79020b-902b-425d-ab7d-d73c666d7582-kube-api-access-sfrtc\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.513372 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d79020b-902b-425d-ab7d-d73c666d7582-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.811449 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" event={"ID":"7d79020b-902b-425d-ab7d-d73c666d7582","Type":"ContainerDied","Data":"17939dd03b7d8ce2e47f5353b4d591cdb2778a41d70b696b8acfd43fb9221d69"} Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.811493 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17939dd03b7d8ce2e47f5353b4d591cdb2778a41d70b696b8acfd43fb9221d69" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.811508 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-wrb7z" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.907759 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-kttsz"] Jan 29 12:48:41 crc kubenswrapper[4852]: E0129 12:48:41.908499 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="extract-utilities" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.908525 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="extract-utilities" Jan 29 12:48:41 crc kubenswrapper[4852]: E0129 12:48:41.908547 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="extract-content" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.908555 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="extract-content" Jan 29 12:48:41 crc kubenswrapper[4852]: E0129 12:48:41.908603 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="registry-server" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.908613 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="registry-server" Jan 29 12:48:41 crc kubenswrapper[4852]: E0129 12:48:41.908646 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d79020b-902b-425d-ab7d-d73c666d7582" containerName="configure-network-openstack-openstack-cell1" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.908655 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d79020b-902b-425d-ab7d-d73c666d7582" containerName="configure-network-openstack-openstack-cell1" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.908979 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="16e74c6f-c646-475a-8f42-61c0dd6dbc3a" containerName="registry-server" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.909016 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d79020b-902b-425d-ab7d-d73c666d7582" containerName="configure-network-openstack-openstack-cell1" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.910027 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.911807 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.917784 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.917977 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.918252 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:48:41 crc kubenswrapper[4852]: I0129 12:48:41.919340 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-kttsz"] Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.028991 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.029088 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5hf4\" (UniqueName: \"kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.029161 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.029190 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.131126 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.131921 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.132668 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.133230 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5hf4\" (UniqueName: \"kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.136699 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.137751 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.138031 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.149789 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5hf4\" (UniqueName: \"kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4\") pod \"validate-network-openstack-openstack-cell1-kttsz\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.238837 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.834630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-kttsz"] Jan 29 12:48:42 crc kubenswrapper[4852]: I0129 12:48:42.842443 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:48:43 crc kubenswrapper[4852]: I0129 12:48:43.520961 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:48:43 crc kubenswrapper[4852]: I0129 12:48:43.831438 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" event={"ID":"3d134483-206b-459e-86b2-5892ef691b64","Type":"ContainerStarted","Data":"5d3be480cf314644bf6730e1455d13c76e0f918a4ddef9bad5cacbe66fe07b22"} Jan 29 12:48:43 crc kubenswrapper[4852]: I0129 12:48:43.831913 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" event={"ID":"3d134483-206b-459e-86b2-5892ef691b64","Type":"ContainerStarted","Data":"721438d57aff7793172b3bfc2663a0b141b46eb68ca5ade47ac08134a4b2e5ad"} Jan 29 12:48:48 crc kubenswrapper[4852]: I0129 12:48:48.885783 4852 generic.go:334] "Generic (PLEG): container finished" podID="3d134483-206b-459e-86b2-5892ef691b64" containerID="5d3be480cf314644bf6730e1455d13c76e0f918a4ddef9bad5cacbe66fe07b22" exitCode=0 Jan 29 12:48:48 crc kubenswrapper[4852]: I0129 12:48:48.885867 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" event={"ID":"3d134483-206b-459e-86b2-5892ef691b64","Type":"ContainerDied","Data":"5d3be480cf314644bf6730e1455d13c76e0f918a4ddef9bad5cacbe66fe07b22"} Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.447989 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.535369 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph\") pod \"3d134483-206b-459e-86b2-5892ef691b64\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.535435 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5hf4\" (UniqueName: \"kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4\") pod \"3d134483-206b-459e-86b2-5892ef691b64\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.535465 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1\") pod \"3d134483-206b-459e-86b2-5892ef691b64\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.535506 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory\") pod \"3d134483-206b-459e-86b2-5892ef691b64\" (UID: \"3d134483-206b-459e-86b2-5892ef691b64\") " Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.540796 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph" (OuterVolumeSpecName: "ceph") pod "3d134483-206b-459e-86b2-5892ef691b64" (UID: "3d134483-206b-459e-86b2-5892ef691b64"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.541868 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4" (OuterVolumeSpecName: "kube-api-access-c5hf4") pod "3d134483-206b-459e-86b2-5892ef691b64" (UID: "3d134483-206b-459e-86b2-5892ef691b64"). InnerVolumeSpecName "kube-api-access-c5hf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.562373 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "3d134483-206b-459e-86b2-5892ef691b64" (UID: "3d134483-206b-459e-86b2-5892ef691b64"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.564368 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory" (OuterVolumeSpecName: "inventory") pod "3d134483-206b-459e-86b2-5892ef691b64" (UID: "3d134483-206b-459e-86b2-5892ef691b64"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.638225 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.638280 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5hf4\" (UniqueName: \"kubernetes.io/projected/3d134483-206b-459e-86b2-5892ef691b64-kube-api-access-c5hf4\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.638292 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.638302 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3d134483-206b-459e-86b2-5892ef691b64-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.908652 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" event={"ID":"3d134483-206b-459e-86b2-5892ef691b64","Type":"ContainerDied","Data":"721438d57aff7793172b3bfc2663a0b141b46eb68ca5ade47ac08134a4b2e5ad"} Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.908700 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="721438d57aff7793172b3bfc2663a0b141b46eb68ca5ade47ac08134a4b2e5ad" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.908770 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-kttsz" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.996886 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v5scc"] Jan 29 12:48:50 crc kubenswrapper[4852]: E0129 12:48:50.997850 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d134483-206b-459e-86b2-5892ef691b64" containerName="validate-network-openstack-openstack-cell1" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.997877 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d134483-206b-459e-86b2-5892ef691b64" containerName="validate-network-openstack-openstack-cell1" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.998128 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d134483-206b-459e-86b2-5892ef691b64" containerName="validate-network-openstack-openstack-cell1" Jan 29 12:48:50 crc kubenswrapper[4852]: I0129 12:48:50.999038 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.010175 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.010237 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.010175 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.010476 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.024240 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v5scc"] Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.050622 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvwrv\" (UniqueName: \"kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.050681 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.050719 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.050800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.153482 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.153722 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvwrv\" (UniqueName: \"kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.153776 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.153831 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.158308 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.158477 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.165218 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.172828 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvwrv\" (UniqueName: \"kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv\") pod \"install-os-openstack-openstack-cell1-v5scc\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.321105 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.847749 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v5scc"] Jan 29 12:48:51 crc kubenswrapper[4852]: W0129 12:48:51.852813 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf8a99a6_9c2a_4944_8ec6_4817ebfde889.slice/crio-7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90 WatchSource:0}: Error finding container 7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90: Status 404 returned error can't find the container with id 7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90 Jan 29 12:48:51 crc kubenswrapper[4852]: I0129 12:48:51.920773 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v5scc" event={"ID":"af8a99a6-9c2a-4944-8ec6-4817ebfde889","Type":"ContainerStarted","Data":"7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90"} Jan 29 12:48:52 crc kubenswrapper[4852]: I0129 12:48:52.934509 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v5scc" event={"ID":"af8a99a6-9c2a-4944-8ec6-4817ebfde889","Type":"ContainerStarted","Data":"d4b7c0c2a0887799093a8753b1085c7eccdfba90917bc5f7edce5b9f39a98a29"} Jan 29 12:48:52 crc kubenswrapper[4852]: I0129 12:48:52.953828 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-v5scc" podStartSLOduration=2.542146272 podStartE2EDuration="2.953808128s" podCreationTimestamp="2026-01-29 12:48:50 +0000 UTC" firstStartedPulling="2026-01-29 12:48:51.855128831 +0000 UTC m=+7629.072459975" lastFinishedPulling="2026-01-29 12:48:52.266790697 +0000 UTC m=+7629.484121831" observedRunningTime="2026-01-29 12:48:52.949052192 +0000 UTC m=+7630.166383326" watchObservedRunningTime="2026-01-29 12:48:52.953808128 +0000 UTC m=+7630.171139262" Jan 29 12:49:30 crc kubenswrapper[4852]: I0129 12:49:30.016530 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:49:30 crc kubenswrapper[4852]: I0129 12:49:30.017119 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:49:38 crc kubenswrapper[4852]: I0129 12:49:38.371888 4852 generic.go:334] "Generic (PLEG): container finished" podID="af8a99a6-9c2a-4944-8ec6-4817ebfde889" containerID="d4b7c0c2a0887799093a8753b1085c7eccdfba90917bc5f7edce5b9f39a98a29" exitCode=0 Jan 29 12:49:38 crc kubenswrapper[4852]: I0129 12:49:38.371976 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v5scc" event={"ID":"af8a99a6-9c2a-4944-8ec6-4817ebfde889","Type":"ContainerDied","Data":"d4b7c0c2a0887799093a8753b1085c7eccdfba90917bc5f7edce5b9f39a98a29"} Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.821470 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.937292 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1\") pod \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.937388 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph\") pod \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.937415 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvwrv\" (UniqueName: \"kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv\") pod \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.937517 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory\") pod \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\" (UID: \"af8a99a6-9c2a-4944-8ec6-4817ebfde889\") " Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.944704 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph" (OuterVolumeSpecName: "ceph") pod "af8a99a6-9c2a-4944-8ec6-4817ebfde889" (UID: "af8a99a6-9c2a-4944-8ec6-4817ebfde889"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.944782 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv" (OuterVolumeSpecName: "kube-api-access-pvwrv") pod "af8a99a6-9c2a-4944-8ec6-4817ebfde889" (UID: "af8a99a6-9c2a-4944-8ec6-4817ebfde889"). InnerVolumeSpecName "kube-api-access-pvwrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.968176 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "af8a99a6-9c2a-4944-8ec6-4817ebfde889" (UID: "af8a99a6-9c2a-4944-8ec6-4817ebfde889"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:49:39 crc kubenswrapper[4852]: I0129 12:49:39.969893 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory" (OuterVolumeSpecName: "inventory") pod "af8a99a6-9c2a-4944-8ec6-4817ebfde889" (UID: "af8a99a6-9c2a-4944-8ec6-4817ebfde889"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.043242 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.043283 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.043294 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvwrv\" (UniqueName: \"kubernetes.io/projected/af8a99a6-9c2a-4944-8ec6-4817ebfde889-kube-api-access-pvwrv\") on node \"crc\" DevicePath \"\"" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.043305 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af8a99a6-9c2a-4944-8ec6-4817ebfde889-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.392780 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v5scc" event={"ID":"af8a99a6-9c2a-4944-8ec6-4817ebfde889","Type":"ContainerDied","Data":"7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90"} Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.392825 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ac6e88fb9f92a8bfb9caf454efb618c35ccd22783a7d0fc93775167ccd26c90" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.392851 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v5scc" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.490951 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-zw2j2"] Jan 29 12:49:40 crc kubenswrapper[4852]: E0129 12:49:40.491957 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af8a99a6-9c2a-4944-8ec6-4817ebfde889" containerName="install-os-openstack-openstack-cell1" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.491985 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="af8a99a6-9c2a-4944-8ec6-4817ebfde889" containerName="install-os-openstack-openstack-cell1" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.492253 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="af8a99a6-9c2a-4944-8ec6-4817ebfde889" containerName="install-os-openstack-openstack-cell1" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.493489 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.496671 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.497708 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.499956 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.500705 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.507571 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-zw2j2"] Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.653973 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.654134 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.654156 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.654209 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8zz4\" (UniqueName: \"kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.757649 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8zz4\" (UniqueName: \"kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.758660 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.759067 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.759311 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.763941 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.764021 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.766346 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.796573 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8zz4\" (UniqueName: \"kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4\") pod \"configure-os-openstack-openstack-cell1-zw2j2\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:40 crc kubenswrapper[4852]: I0129 12:49:40.861547 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:49:41 crc kubenswrapper[4852]: I0129 12:49:41.485967 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-zw2j2"] Jan 29 12:49:42 crc kubenswrapper[4852]: I0129 12:49:42.412030 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" event={"ID":"42cffd93-f8fd-4c04-b766-98fc38dcae2e","Type":"ContainerStarted","Data":"54178d224d2737821b80309f8bc3e1f58293543bec5930fe5c784806d16375a6"} Jan 29 12:49:43 crc kubenswrapper[4852]: I0129 12:49:43.422899 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" event={"ID":"42cffd93-f8fd-4c04-b766-98fc38dcae2e","Type":"ContainerStarted","Data":"c551e0f9cd2707ee1b8264fe4b5b19eac3af6df32d527f79d81d43ca05549da9"} Jan 29 12:49:43 crc kubenswrapper[4852]: I0129 12:49:43.453154 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" podStartSLOduration=2.633665682 podStartE2EDuration="3.453130238s" podCreationTimestamp="2026-01-29 12:49:40 +0000 UTC" firstStartedPulling="2026-01-29 12:49:41.459792793 +0000 UTC m=+7678.677123927" lastFinishedPulling="2026-01-29 12:49:42.279257349 +0000 UTC m=+7679.496588483" observedRunningTime="2026-01-29 12:49:43.442774436 +0000 UTC m=+7680.660105570" watchObservedRunningTime="2026-01-29 12:49:43.453130238 +0000 UTC m=+7680.670461372" Jan 29 12:50:00 crc kubenswrapper[4852]: I0129 12:50:00.017861 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:50:00 crc kubenswrapper[4852]: I0129 12:50:00.018330 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.180189 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.185978 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.196559 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.249195 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84dlv\" (UniqueName: \"kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.249253 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.249364 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.351122 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84dlv\" (UniqueName: \"kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.351216 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.351387 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.351866 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.351975 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.375315 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84dlv\" (UniqueName: \"kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv\") pod \"certified-operators-8c65d\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:02 crc kubenswrapper[4852]: I0129 12:50:02.506058 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:03 crc kubenswrapper[4852]: W0129 12:50:03.072328 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf0d7b61_493f_446c_86d0_6991a325ff56.slice/crio-ff3759e96ff3fcd9215eb256b5f8f5000952420c9b3e6e9492554e7d9dcdc88a WatchSource:0}: Error finding container ff3759e96ff3fcd9215eb256b5f8f5000952420c9b3e6e9492554e7d9dcdc88a: Status 404 returned error can't find the container with id ff3759e96ff3fcd9215eb256b5f8f5000952420c9b3e6e9492554e7d9dcdc88a Jan 29 12:50:03 crc kubenswrapper[4852]: I0129 12:50:03.083920 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:03 crc kubenswrapper[4852]: I0129 12:50:03.626084 4852 generic.go:334] "Generic (PLEG): container finished" podID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerID="9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3" exitCode=0 Jan 29 12:50:03 crc kubenswrapper[4852]: I0129 12:50:03.626169 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerDied","Data":"9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3"} Jan 29 12:50:03 crc kubenswrapper[4852]: I0129 12:50:03.626465 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerStarted","Data":"ff3759e96ff3fcd9215eb256b5f8f5000952420c9b3e6e9492554e7d9dcdc88a"} Jan 29 12:50:07 crc kubenswrapper[4852]: I0129 12:50:07.667133 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerStarted","Data":"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8"} Jan 29 12:50:09 crc kubenswrapper[4852]: I0129 12:50:09.685756 4852 generic.go:334] "Generic (PLEG): container finished" podID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerID="1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8" exitCode=0 Jan 29 12:50:09 crc kubenswrapper[4852]: I0129 12:50:09.685855 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerDied","Data":"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8"} Jan 29 12:50:10 crc kubenswrapper[4852]: I0129 12:50:10.700999 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerStarted","Data":"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab"} Jan 29 12:50:10 crc kubenswrapper[4852]: I0129 12:50:10.725214 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8c65d" podStartSLOduration=2.23008818 podStartE2EDuration="8.725193601s" podCreationTimestamp="2026-01-29 12:50:02 +0000 UTC" firstStartedPulling="2026-01-29 12:50:03.627835133 +0000 UTC m=+7700.845166267" lastFinishedPulling="2026-01-29 12:50:10.122940534 +0000 UTC m=+7707.340271688" observedRunningTime="2026-01-29 12:50:10.718940439 +0000 UTC m=+7707.936271583" watchObservedRunningTime="2026-01-29 12:50:10.725193601 +0000 UTC m=+7707.942524745" Jan 29 12:50:12 crc kubenswrapper[4852]: I0129 12:50:12.506254 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:12 crc kubenswrapper[4852]: I0129 12:50:12.506798 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:13 crc kubenswrapper[4852]: I0129 12:50:13.567147 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-8c65d" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="registry-server" probeResult="failure" output=< Jan 29 12:50:13 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:50:13 crc kubenswrapper[4852]: > Jan 29 12:50:22 crc kubenswrapper[4852]: I0129 12:50:22.572733 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:22 crc kubenswrapper[4852]: I0129 12:50:22.638484 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:22 crc kubenswrapper[4852]: I0129 12:50:22.827142 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:23 crc kubenswrapper[4852]: I0129 12:50:23.838393 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8c65d" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="registry-server" containerID="cri-o://0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab" gracePeriod=2 Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.373783 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.485865 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content\") pod \"cf0d7b61-493f-446c-86d0-6991a325ff56\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.485943 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities\") pod \"cf0d7b61-493f-446c-86d0-6991a325ff56\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.485980 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84dlv\" (UniqueName: \"kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv\") pod \"cf0d7b61-493f-446c-86d0-6991a325ff56\" (UID: \"cf0d7b61-493f-446c-86d0-6991a325ff56\") " Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.486870 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities" (OuterVolumeSpecName: "utilities") pod "cf0d7b61-493f-446c-86d0-6991a325ff56" (UID: "cf0d7b61-493f-446c-86d0-6991a325ff56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.487199 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.492725 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv" (OuterVolumeSpecName: "kube-api-access-84dlv") pod "cf0d7b61-493f-446c-86d0-6991a325ff56" (UID: "cf0d7b61-493f-446c-86d0-6991a325ff56"). InnerVolumeSpecName "kube-api-access-84dlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.530295 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf0d7b61-493f-446c-86d0-6991a325ff56" (UID: "cf0d7b61-493f-446c-86d0-6991a325ff56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.589636 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf0d7b61-493f-446c-86d0-6991a325ff56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.589668 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84dlv\" (UniqueName: \"kubernetes.io/projected/cf0d7b61-493f-446c-86d0-6991a325ff56-kube-api-access-84dlv\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.852146 4852 generic.go:334] "Generic (PLEG): container finished" podID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerID="0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab" exitCode=0 Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.852199 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8c65d" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.852220 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerDied","Data":"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab"} Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.852923 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8c65d" event={"ID":"cf0d7b61-493f-446c-86d0-6991a325ff56","Type":"ContainerDied","Data":"ff3759e96ff3fcd9215eb256b5f8f5000952420c9b3e6e9492554e7d9dcdc88a"} Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.852946 4852 scope.go:117] "RemoveContainer" containerID="0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.881798 4852 scope.go:117] "RemoveContainer" containerID="1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.899144 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.908962 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8c65d"] Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.923006 4852 scope.go:117] "RemoveContainer" containerID="9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.976870 4852 scope.go:117] "RemoveContainer" containerID="0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab" Jan 29 12:50:24 crc kubenswrapper[4852]: E0129 12:50:24.977363 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab\": container with ID starting with 0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab not found: ID does not exist" containerID="0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.977420 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab"} err="failed to get container status \"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab\": rpc error: code = NotFound desc = could not find container \"0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab\": container with ID starting with 0ddef733ce55249514f2403f067152588ab1fe8a0b4c3f4a3336125b405be8ab not found: ID does not exist" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.977452 4852 scope.go:117] "RemoveContainer" containerID="1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8" Jan 29 12:50:24 crc kubenswrapper[4852]: E0129 12:50:24.978315 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8\": container with ID starting with 1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8 not found: ID does not exist" containerID="1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.978368 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8"} err="failed to get container status \"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8\": rpc error: code = NotFound desc = could not find container \"1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8\": container with ID starting with 1524c8cb7a3be8333f07aeee2fc7ff9b7f270664237f6820fcca554ddd28a5e8 not found: ID does not exist" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.978402 4852 scope.go:117] "RemoveContainer" containerID="9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3" Jan 29 12:50:24 crc kubenswrapper[4852]: E0129 12:50:24.978902 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3\": container with ID starting with 9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3 not found: ID does not exist" containerID="9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3" Jan 29 12:50:24 crc kubenswrapper[4852]: I0129 12:50:24.978938 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3"} err="failed to get container status \"9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3\": rpc error: code = NotFound desc = could not find container \"9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3\": container with ID starting with 9ec3220589b27a12c25712d1736ddff741e6f788e1a8dde6960be7ee02bc64c3 not found: ID does not exist" Jan 29 12:50:25 crc kubenswrapper[4852]: I0129 12:50:25.477697 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" path="/var/lib/kubelet/pods/cf0d7b61-493f-446c-86d0-6991a325ff56/volumes" Jan 29 12:50:25 crc kubenswrapper[4852]: I0129 12:50:25.865380 4852 generic.go:334] "Generic (PLEG): container finished" podID="42cffd93-f8fd-4c04-b766-98fc38dcae2e" containerID="c551e0f9cd2707ee1b8264fe4b5b19eac3af6df32d527f79d81d43ca05549da9" exitCode=0 Jan 29 12:50:25 crc kubenswrapper[4852]: I0129 12:50:25.865471 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" event={"ID":"42cffd93-f8fd-4c04-b766-98fc38dcae2e","Type":"ContainerDied","Data":"c551e0f9cd2707ee1b8264fe4b5b19eac3af6df32d527f79d81d43ca05549da9"} Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.335928 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.462501 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1\") pod \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.462663 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8zz4\" (UniqueName: \"kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4\") pod \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.462693 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory\") pod \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.462756 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph\") pod \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\" (UID: \"42cffd93-f8fd-4c04-b766-98fc38dcae2e\") " Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.469825 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph" (OuterVolumeSpecName: "ceph") pod "42cffd93-f8fd-4c04-b766-98fc38dcae2e" (UID: "42cffd93-f8fd-4c04-b766-98fc38dcae2e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.476991 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4" (OuterVolumeSpecName: "kube-api-access-f8zz4") pod "42cffd93-f8fd-4c04-b766-98fc38dcae2e" (UID: "42cffd93-f8fd-4c04-b766-98fc38dcae2e"). InnerVolumeSpecName "kube-api-access-f8zz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.490775 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory" (OuterVolumeSpecName: "inventory") pod "42cffd93-f8fd-4c04-b766-98fc38dcae2e" (UID: "42cffd93-f8fd-4c04-b766-98fc38dcae2e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.501298 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "42cffd93-f8fd-4c04-b766-98fc38dcae2e" (UID: "42cffd93-f8fd-4c04-b766-98fc38dcae2e"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.572188 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8zz4\" (UniqueName: \"kubernetes.io/projected/42cffd93-f8fd-4c04-b766-98fc38dcae2e-kube-api-access-f8zz4\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.572231 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.572242 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.572253 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/42cffd93-f8fd-4c04-b766-98fc38dcae2e-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.888419 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" event={"ID":"42cffd93-f8fd-4c04-b766-98fc38dcae2e","Type":"ContainerDied","Data":"54178d224d2737821b80309f8bc3e1f58293543bec5930fe5c784806d16375a6"} Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.888751 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54178d224d2737821b80309f8bc3e1f58293543bec5930fe5c784806d16375a6" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.888496 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-zw2j2" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.990751 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-x6l6b"] Jan 29 12:50:27 crc kubenswrapper[4852]: E0129 12:50:27.991226 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="registry-server" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991243 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="registry-server" Jan 29 12:50:27 crc kubenswrapper[4852]: E0129 12:50:27.991263 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42cffd93-f8fd-4c04-b766-98fc38dcae2e" containerName="configure-os-openstack-openstack-cell1" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991270 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="42cffd93-f8fd-4c04-b766-98fc38dcae2e" containerName="configure-os-openstack-openstack-cell1" Jan 29 12:50:27 crc kubenswrapper[4852]: E0129 12:50:27.991286 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="extract-content" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991292 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="extract-content" Jan 29 12:50:27 crc kubenswrapper[4852]: E0129 12:50:27.991320 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="extract-utilities" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991327 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="extract-utilities" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991553 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf0d7b61-493f-446c-86d0-6991a325ff56" containerName="registry-server" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.991619 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="42cffd93-f8fd-4c04-b766-98fc38dcae2e" containerName="configure-os-openstack-openstack-cell1" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.992423 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.995078 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.995475 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.996465 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:50:27 crc kubenswrapper[4852]: I0129 12:50:27.997076 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.002718 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-x6l6b"] Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.083414 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.083491 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.084294 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.084522 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsr9c\" (UniqueName: \"kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.186788 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.186880 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsr9c\" (UniqueName: \"kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.186907 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.186927 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.197226 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.198234 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.198415 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.211626 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsr9c\" (UniqueName: \"kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c\") pod \"ssh-known-hosts-openstack-x6l6b\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.309983 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.852130 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-x6l6b"] Jan 29 12:50:28 crc kubenswrapper[4852]: I0129 12:50:28.899634 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-x6l6b" event={"ID":"d7e6ad57-a997-4ce9-989d-c5df67c43fb5","Type":"ContainerStarted","Data":"f5bd4ae39893e3c14ac4649e1a11f7b26541fc10df68c503e6f4383bbae43989"} Jan 29 12:50:29 crc kubenswrapper[4852]: I0129 12:50:29.911929 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-x6l6b" event={"ID":"d7e6ad57-a997-4ce9-989d-c5df67c43fb5","Type":"ContainerStarted","Data":"d31f38ba6ab949e0d8d15297ecca0a8b04ed3a9723af11b0b84402097acb9e92"} Jan 29 12:50:29 crc kubenswrapper[4852]: I0129 12:50:29.933903 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-x6l6b" podStartSLOduration=2.492319126 podStartE2EDuration="2.93387864s" podCreationTimestamp="2026-01-29 12:50:27 +0000 UTC" firstStartedPulling="2026-01-29 12:50:28.860121389 +0000 UTC m=+7726.077452523" lastFinishedPulling="2026-01-29 12:50:29.301680903 +0000 UTC m=+7726.519012037" observedRunningTime="2026-01-29 12:50:29.928537289 +0000 UTC m=+7727.145868443" watchObservedRunningTime="2026-01-29 12:50:29.93387864 +0000 UTC m=+7727.151209784" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.017267 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.017317 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.017357 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.018130 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.018184 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" gracePeriod=600 Jan 29 12:50:30 crc kubenswrapper[4852]: E0129 12:50:30.150471 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.924813 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" exitCode=0 Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.924973 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35"} Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.925188 4852 scope.go:117] "RemoveContainer" containerID="19fb261519b90fb978d167b76c52580c164aaf115fa1a929bdbf476e65b0a58f" Jan 29 12:50:30 crc kubenswrapper[4852]: I0129 12:50:30.926081 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:50:30 crc kubenswrapper[4852]: E0129 12:50:30.926729 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:50:38 crc kubenswrapper[4852]: I0129 12:50:38.056536 4852 generic.go:334] "Generic (PLEG): container finished" podID="d7e6ad57-a997-4ce9-989d-c5df67c43fb5" containerID="d31f38ba6ab949e0d8d15297ecca0a8b04ed3a9723af11b0b84402097acb9e92" exitCode=0 Jan 29 12:50:38 crc kubenswrapper[4852]: I0129 12:50:38.056610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-x6l6b" event={"ID":"d7e6ad57-a997-4ce9-989d-c5df67c43fb5","Type":"ContainerDied","Data":"d31f38ba6ab949e0d8d15297ecca0a8b04ed3a9723af11b0b84402097acb9e92"} Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.572801 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.662953 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsr9c\" (UniqueName: \"kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c\") pod \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.663434 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0\") pod \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.664022 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1\") pod \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.664147 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph\") pod \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\" (UID: \"d7e6ad57-a997-4ce9-989d-c5df67c43fb5\") " Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.668686 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c" (OuterVolumeSpecName: "kube-api-access-tsr9c") pod "d7e6ad57-a997-4ce9-989d-c5df67c43fb5" (UID: "d7e6ad57-a997-4ce9-989d-c5df67c43fb5"). InnerVolumeSpecName "kube-api-access-tsr9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.669657 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph" (OuterVolumeSpecName: "ceph") pod "d7e6ad57-a997-4ce9-989d-c5df67c43fb5" (UID: "d7e6ad57-a997-4ce9-989d-c5df67c43fb5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.699553 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "d7e6ad57-a997-4ce9-989d-c5df67c43fb5" (UID: "d7e6ad57-a997-4ce9-989d-c5df67c43fb5"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.706698 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "d7e6ad57-a997-4ce9-989d-c5df67c43fb5" (UID: "d7e6ad57-a997-4ce9-989d-c5df67c43fb5"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.767562 4852 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.767812 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.767954 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:39 crc kubenswrapper[4852]: I0129 12:50:39.768044 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsr9c\" (UniqueName: \"kubernetes.io/projected/d7e6ad57-a997-4ce9-989d-c5df67c43fb5-kube-api-access-tsr9c\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.078420 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-x6l6b" event={"ID":"d7e6ad57-a997-4ce9-989d-c5df67c43fb5","Type":"ContainerDied","Data":"f5bd4ae39893e3c14ac4649e1a11f7b26541fc10df68c503e6f4383bbae43989"} Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.078873 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5bd4ae39893e3c14ac4649e1a11f7b26541fc10df68c503e6f4383bbae43989" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.078932 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-x6l6b" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.150434 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-sqrzf"] Jan 29 12:50:40 crc kubenswrapper[4852]: E0129 12:50:40.151237 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7e6ad57-a997-4ce9-989d-c5df67c43fb5" containerName="ssh-known-hosts-openstack" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.151259 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7e6ad57-a997-4ce9-989d-c5df67c43fb5" containerName="ssh-known-hosts-openstack" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.151573 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7e6ad57-a997-4ce9-989d-c5df67c43fb5" containerName="ssh-known-hosts-openstack" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.152693 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.156604 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.156674 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.156836 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.156902 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.165406 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-sqrzf"] Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.282997 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.283048 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcfpd\" (UniqueName: \"kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.283086 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.283392 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.385884 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.385950 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcfpd\" (UniqueName: \"kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.386006 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.386111 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.390260 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.390349 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.390494 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.408399 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcfpd\" (UniqueName: \"kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd\") pod \"run-os-openstack-openstack-cell1-sqrzf\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:40 crc kubenswrapper[4852]: I0129 12:50:40.475839 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:41 crc kubenswrapper[4852]: I0129 12:50:41.016140 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-sqrzf"] Jan 29 12:50:41 crc kubenswrapper[4852]: I0129 12:50:41.088624 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" event={"ID":"7f8604b9-a876-4028-a7a1-edccc04598b7","Type":"ContainerStarted","Data":"bf5b3f6e08a2c4460b1acecfabe7de7e3dfc572aab27b817c819d28998453c9e"} Jan 29 12:50:41 crc kubenswrapper[4852]: I0129 12:50:41.464769 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:50:41 crc kubenswrapper[4852]: E0129 12:50:41.465642 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:50:42 crc kubenswrapper[4852]: I0129 12:50:42.100847 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" event={"ID":"7f8604b9-a876-4028-a7a1-edccc04598b7","Type":"ContainerStarted","Data":"05a53e8b453f0c51b10054b428681a2d562d7561aa77fe09eddc9967b40c86b8"} Jan 29 12:50:42 crc kubenswrapper[4852]: I0129 12:50:42.121990 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" podStartSLOduration=1.727914132 podStartE2EDuration="2.121968278s" podCreationTimestamp="2026-01-29 12:50:40 +0000 UTC" firstStartedPulling="2026-01-29 12:50:41.021359114 +0000 UTC m=+7738.238690258" lastFinishedPulling="2026-01-29 12:50:41.41541326 +0000 UTC m=+7738.632744404" observedRunningTime="2026-01-29 12:50:42.116321971 +0000 UTC m=+7739.333653125" watchObservedRunningTime="2026-01-29 12:50:42.121968278 +0000 UTC m=+7739.339299412" Jan 29 12:50:50 crc kubenswrapper[4852]: I0129 12:50:50.180820 4852 generic.go:334] "Generic (PLEG): container finished" podID="7f8604b9-a876-4028-a7a1-edccc04598b7" containerID="05a53e8b453f0c51b10054b428681a2d562d7561aa77fe09eddc9967b40c86b8" exitCode=0 Jan 29 12:50:50 crc kubenswrapper[4852]: I0129 12:50:50.181026 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" event={"ID":"7f8604b9-a876-4028-a7a1-edccc04598b7","Type":"ContainerDied","Data":"05a53e8b453f0c51b10054b428681a2d562d7561aa77fe09eddc9967b40c86b8"} Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.632834 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.737799 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph\") pod \"7f8604b9-a876-4028-a7a1-edccc04598b7\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.737955 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory\") pod \"7f8604b9-a876-4028-a7a1-edccc04598b7\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.738040 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1\") pod \"7f8604b9-a876-4028-a7a1-edccc04598b7\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.738083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcfpd\" (UniqueName: \"kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd\") pod \"7f8604b9-a876-4028-a7a1-edccc04598b7\" (UID: \"7f8604b9-a876-4028-a7a1-edccc04598b7\") " Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.743590 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph" (OuterVolumeSpecName: "ceph") pod "7f8604b9-a876-4028-a7a1-edccc04598b7" (UID: "7f8604b9-a876-4028-a7a1-edccc04598b7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.744739 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd" (OuterVolumeSpecName: "kube-api-access-lcfpd") pod "7f8604b9-a876-4028-a7a1-edccc04598b7" (UID: "7f8604b9-a876-4028-a7a1-edccc04598b7"). InnerVolumeSpecName "kube-api-access-lcfpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.766205 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory" (OuterVolumeSpecName: "inventory") pod "7f8604b9-a876-4028-a7a1-edccc04598b7" (UID: "7f8604b9-a876-4028-a7a1-edccc04598b7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.786359 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "7f8604b9-a876-4028-a7a1-edccc04598b7" (UID: "7f8604b9-a876-4028-a7a1-edccc04598b7"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.841017 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.841051 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcfpd\" (UniqueName: \"kubernetes.io/projected/7f8604b9-a876-4028-a7a1-edccc04598b7-kube-api-access-lcfpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.841061 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:51 crc kubenswrapper[4852]: I0129 12:50:51.841072 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8604b9-a876-4028-a7a1-edccc04598b7-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.206613 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" event={"ID":"7f8604b9-a876-4028-a7a1-edccc04598b7","Type":"ContainerDied","Data":"bf5b3f6e08a2c4460b1acecfabe7de7e3dfc572aab27b817c819d28998453c9e"} Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.207155 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf5b3f6e08a2c4460b1acecfabe7de7e3dfc572aab27b817c819d28998453c9e" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.206651 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-sqrzf" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.277927 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-p8ffg"] Jan 29 12:50:52 crc kubenswrapper[4852]: E0129 12:50:52.278531 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f8604b9-a876-4028-a7a1-edccc04598b7" containerName="run-os-openstack-openstack-cell1" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.278552 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f8604b9-a876-4028-a7a1-edccc04598b7" containerName="run-os-openstack-openstack-cell1" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.278883 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f8604b9-a876-4028-a7a1-edccc04598b7" containerName="run-os-openstack-openstack-cell1" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.279896 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.285337 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.285905 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.286187 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.288053 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-p8ffg"] Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.293774 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.351762 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.351859 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.351897 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5kj2\" (UniqueName: \"kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.351998 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.454921 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.455217 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5kj2\" (UniqueName: \"kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.455422 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.456320 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.461522 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.466950 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.467068 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.471715 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5kj2\" (UniqueName: \"kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2\") pod \"reboot-os-openstack-openstack-cell1-p8ffg\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:52 crc kubenswrapper[4852]: I0129 12:50:52.622178 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:50:53 crc kubenswrapper[4852]: I0129 12:50:53.210778 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-p8ffg"] Jan 29 12:50:53 crc kubenswrapper[4852]: W0129 12:50:53.220299 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02fc8a61_1749_4d19_962f_daaa15a078f6.slice/crio-5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e WatchSource:0}: Error finding container 5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e: Status 404 returned error can't find the container with id 5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e Jan 29 12:50:54 crc kubenswrapper[4852]: I0129 12:50:54.228194 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" event={"ID":"02fc8a61-1749-4d19-962f-daaa15a078f6","Type":"ContainerStarted","Data":"3ba5106bef433ae86664f206fd43565577c3cc11970a7c137575a02abcdb8eee"} Jan 29 12:50:54 crc kubenswrapper[4852]: I0129 12:50:54.228641 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" event={"ID":"02fc8a61-1749-4d19-962f-daaa15a078f6","Type":"ContainerStarted","Data":"5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e"} Jan 29 12:50:54 crc kubenswrapper[4852]: I0129 12:50:54.257482 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" podStartSLOduration=1.514683905 podStartE2EDuration="2.257449815s" podCreationTimestamp="2026-01-29 12:50:52 +0000 UTC" firstStartedPulling="2026-01-29 12:50:53.222599242 +0000 UTC m=+7750.439930386" lastFinishedPulling="2026-01-29 12:50:53.965365162 +0000 UTC m=+7751.182696296" observedRunningTime="2026-01-29 12:50:54.247930933 +0000 UTC m=+7751.465262077" watchObservedRunningTime="2026-01-29 12:50:54.257449815 +0000 UTC m=+7751.474780949" Jan 29 12:50:55 crc kubenswrapper[4852]: I0129 12:50:55.463521 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:50:55 crc kubenswrapper[4852]: E0129 12:50:55.464280 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:51:09 crc kubenswrapper[4852]: I0129 12:51:09.463357 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:51:09 crc kubenswrapper[4852]: E0129 12:51:09.464146 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:51:10 crc kubenswrapper[4852]: I0129 12:51:10.411772 4852 generic.go:334] "Generic (PLEG): container finished" podID="02fc8a61-1749-4d19-962f-daaa15a078f6" containerID="3ba5106bef433ae86664f206fd43565577c3cc11970a7c137575a02abcdb8eee" exitCode=0 Jan 29 12:51:10 crc kubenswrapper[4852]: I0129 12:51:10.412208 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" event={"ID":"02fc8a61-1749-4d19-962f-daaa15a078f6","Type":"ContainerDied","Data":"3ba5106bef433ae86664f206fd43565577c3cc11970a7c137575a02abcdb8eee"} Jan 29 12:51:11 crc kubenswrapper[4852]: I0129 12:51:11.927019 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.034788 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph\") pod \"02fc8a61-1749-4d19-962f-daaa15a078f6\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.035046 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5kj2\" (UniqueName: \"kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2\") pod \"02fc8a61-1749-4d19-962f-daaa15a078f6\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.035230 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory\") pod \"02fc8a61-1749-4d19-962f-daaa15a078f6\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.035300 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1\") pod \"02fc8a61-1749-4d19-962f-daaa15a078f6\" (UID: \"02fc8a61-1749-4d19-962f-daaa15a078f6\") " Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.049628 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph" (OuterVolumeSpecName: "ceph") pod "02fc8a61-1749-4d19-962f-daaa15a078f6" (UID: "02fc8a61-1749-4d19-962f-daaa15a078f6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.053735 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2" (OuterVolumeSpecName: "kube-api-access-n5kj2") pod "02fc8a61-1749-4d19-962f-daaa15a078f6" (UID: "02fc8a61-1749-4d19-962f-daaa15a078f6"). InnerVolumeSpecName "kube-api-access-n5kj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.066947 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "02fc8a61-1749-4d19-962f-daaa15a078f6" (UID: "02fc8a61-1749-4d19-962f-daaa15a078f6"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.071132 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory" (OuterVolumeSpecName: "inventory") pod "02fc8a61-1749-4d19-962f-daaa15a078f6" (UID: "02fc8a61-1749-4d19-962f-daaa15a078f6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.137928 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.137963 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.137973 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5kj2\" (UniqueName: \"kubernetes.io/projected/02fc8a61-1749-4d19-962f-daaa15a078f6-kube-api-access-n5kj2\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.137984 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02fc8a61-1749-4d19-962f-daaa15a078f6-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.431735 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" event={"ID":"02fc8a61-1749-4d19-962f-daaa15a078f6","Type":"ContainerDied","Data":"5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e"} Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.431793 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-p8ffg" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.431793 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b3c5a3edcf095e98d28ff868466dd96151f7b096152f18dd893d8b4612b427e" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.550155 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-b9std"] Jan 29 12:51:12 crc kubenswrapper[4852]: E0129 12:51:12.550711 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fc8a61-1749-4d19-962f-daaa15a078f6" containerName="reboot-os-openstack-openstack-cell1" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.550732 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fc8a61-1749-4d19-962f-daaa15a078f6" containerName="reboot-os-openstack-openstack-cell1" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.551021 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="02fc8a61-1749-4d19-962f-daaa15a078f6" containerName="reboot-os-openstack-openstack-cell1" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.551981 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.555488 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.556634 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.556929 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.557133 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.573015 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-b9std"] Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649071 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649123 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649161 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649208 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp7lw\" (UniqueName: \"kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649344 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649498 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649562 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649653 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649807 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649882 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.649968 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.650016 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752355 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752433 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752465 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752532 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752597 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752652 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752687 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752749 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752772 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752811 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752861 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp7lw\" (UniqueName: \"kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.752882 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.762019 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.774378 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.775157 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.775949 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.778414 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.780813 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.781909 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.783263 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp7lw\" (UniqueName: \"kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.783323 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.785498 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.786373 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.788650 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph\") pod \"install-certs-openstack-openstack-cell1-b9std\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:12 crc kubenswrapper[4852]: I0129 12:51:12.882109 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:13 crc kubenswrapper[4852]: I0129 12:51:13.555696 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-b9std"] Jan 29 12:51:14 crc kubenswrapper[4852]: I0129 12:51:14.454257 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-b9std" event={"ID":"24859cd7-165a-4b80-affd-fea7857b2d93","Type":"ContainerStarted","Data":"968982894a1dc536c5e772d664a3cf1caf67896090589aea1bf1d6ef4835e97a"} Jan 29 12:51:15 crc kubenswrapper[4852]: I0129 12:51:15.476273 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-b9std" event={"ID":"24859cd7-165a-4b80-affd-fea7857b2d93","Type":"ContainerStarted","Data":"8d6a0c5db55d83fee41c33cfe88cf0c05c040270633b3b01734fd0eb2cbcaf3a"} Jan 29 12:51:15 crc kubenswrapper[4852]: I0129 12:51:15.487128 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-b9std" podStartSLOduration=2.015374112 podStartE2EDuration="3.487107204s" podCreationTimestamp="2026-01-29 12:51:12 +0000 UTC" firstStartedPulling="2026-01-29 12:51:13.560214837 +0000 UTC m=+7770.777545961" lastFinishedPulling="2026-01-29 12:51:15.031947919 +0000 UTC m=+7772.249279053" observedRunningTime="2026-01-29 12:51:15.485187897 +0000 UTC m=+7772.702519031" watchObservedRunningTime="2026-01-29 12:51:15.487107204 +0000 UTC m=+7772.704438338" Jan 29 12:51:23 crc kubenswrapper[4852]: I0129 12:51:23.470208 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:51:23 crc kubenswrapper[4852]: E0129 12:51:23.471068 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:51:34 crc kubenswrapper[4852]: I0129 12:51:34.670453 4852 generic.go:334] "Generic (PLEG): container finished" podID="24859cd7-165a-4b80-affd-fea7857b2d93" containerID="8d6a0c5db55d83fee41c33cfe88cf0c05c040270633b3b01734fd0eb2cbcaf3a" exitCode=0 Jan 29 12:51:34 crc kubenswrapper[4852]: I0129 12:51:34.670530 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-b9std" event={"ID":"24859cd7-165a-4b80-affd-fea7857b2d93","Type":"ContainerDied","Data":"8d6a0c5db55d83fee41c33cfe88cf0c05c040270633b3b01734fd0eb2cbcaf3a"} Jan 29 12:51:35 crc kubenswrapper[4852]: I0129 12:51:35.464052 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:51:35 crc kubenswrapper[4852]: E0129 12:51:35.465187 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.172250 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189048 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189141 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189177 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189224 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189291 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189315 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jp7lw\" (UniqueName: \"kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189351 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189422 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189441 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189535 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189565 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.189611 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory\") pod \"24859cd7-165a-4b80-affd-fea7857b2d93\" (UID: \"24859cd7-165a-4b80-affd-fea7857b2d93\") " Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.199849 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.199879 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.199959 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.199979 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.200060 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.200345 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.201211 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.203906 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph" (OuterVolumeSpecName: "ceph") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.210966 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw" (OuterVolumeSpecName: "kube-api-access-jp7lw") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "kube-api-access-jp7lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.215472 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.228201 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.240280 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory" (OuterVolumeSpecName: "inventory") pod "24859cd7-165a-4b80-affd-fea7857b2d93" (UID: "24859cd7-165a-4b80-affd-fea7857b2d93"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292065 4852 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292115 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292130 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292143 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292156 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292169 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292180 4852 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292188 4852 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292199 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jp7lw\" (UniqueName: \"kubernetes.io/projected/24859cd7-165a-4b80-affd-fea7857b2d93-kube-api-access-jp7lw\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292207 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292215 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.292223 4852 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24859cd7-165a-4b80-affd-fea7857b2d93-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.694723 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-b9std" event={"ID":"24859cd7-165a-4b80-affd-fea7857b2d93","Type":"ContainerDied","Data":"968982894a1dc536c5e772d664a3cf1caf67896090589aea1bf1d6ef4835e97a"} Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.695044 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="968982894a1dc536c5e772d664a3cf1caf67896090589aea1bf1d6ef4835e97a" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.694776 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-b9std" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.793642 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-z622l"] Jan 29 12:51:36 crc kubenswrapper[4852]: E0129 12:51:36.794149 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24859cd7-165a-4b80-affd-fea7857b2d93" containerName="install-certs-openstack-openstack-cell1" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.794167 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="24859cd7-165a-4b80-affd-fea7857b2d93" containerName="install-certs-openstack-openstack-cell1" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.794365 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="24859cd7-165a-4b80-affd-fea7857b2d93" containerName="install-certs-openstack-openstack-cell1" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.795138 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.803906 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.804242 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.804395 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.805432 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.807543 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.807650 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48rjc\" (UniqueName: \"kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.807727 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.807773 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.809170 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-z622l"] Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.910032 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.910093 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.910491 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.910714 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48rjc\" (UniqueName: \"kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.914115 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.914364 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.914707 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:36 crc kubenswrapper[4852]: I0129 12:51:36.928222 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48rjc\" (UniqueName: \"kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc\") pod \"ceph-client-openstack-openstack-cell1-z622l\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:37 crc kubenswrapper[4852]: I0129 12:51:37.173246 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:37 crc kubenswrapper[4852]: I0129 12:51:37.917985 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-z622l"] Jan 29 12:51:38 crc kubenswrapper[4852]: I0129 12:51:38.726482 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" event={"ID":"f5a52527-628d-427d-8edc-59dc40c51386","Type":"ContainerStarted","Data":"fe45ce92431d133250d1d81aee318b8350eef1ac899ce24080ae73d0a16be918"} Jan 29 12:51:39 crc kubenswrapper[4852]: I0129 12:51:39.739673 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" event={"ID":"f5a52527-628d-427d-8edc-59dc40c51386","Type":"ContainerStarted","Data":"7b93b65b93edf40b5dbb7f1af8cdefff9ca7044e64f18c79656cc9c9aa8de51c"} Jan 29 12:51:40 crc kubenswrapper[4852]: I0129 12:51:40.779312 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" podStartSLOduration=3.3458905 podStartE2EDuration="4.779288149s" podCreationTimestamp="2026-01-29 12:51:36 +0000 UTC" firstStartedPulling="2026-01-29 12:51:37.922637208 +0000 UTC m=+7795.139968342" lastFinishedPulling="2026-01-29 12:51:39.356034867 +0000 UTC m=+7796.573365991" observedRunningTime="2026-01-29 12:51:40.768142248 +0000 UTC m=+7797.985473382" watchObservedRunningTime="2026-01-29 12:51:40.779288149 +0000 UTC m=+7797.996619283" Jan 29 12:51:44 crc kubenswrapper[4852]: I0129 12:51:44.794815 4852 generic.go:334] "Generic (PLEG): container finished" podID="f5a52527-628d-427d-8edc-59dc40c51386" containerID="7b93b65b93edf40b5dbb7f1af8cdefff9ca7044e64f18c79656cc9c9aa8de51c" exitCode=0 Jan 29 12:51:44 crc kubenswrapper[4852]: I0129 12:51:44.794905 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" event={"ID":"f5a52527-628d-427d-8edc-59dc40c51386","Type":"ContainerDied","Data":"7b93b65b93edf40b5dbb7f1af8cdefff9ca7044e64f18c79656cc9c9aa8de51c"} Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.296535 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.331145 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1\") pod \"f5a52527-628d-427d-8edc-59dc40c51386\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.331683 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48rjc\" (UniqueName: \"kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc\") pod \"f5a52527-628d-427d-8edc-59dc40c51386\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.332502 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory\") pod \"f5a52527-628d-427d-8edc-59dc40c51386\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.332757 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph\") pod \"f5a52527-628d-427d-8edc-59dc40c51386\" (UID: \"f5a52527-628d-427d-8edc-59dc40c51386\") " Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.337430 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph" (OuterVolumeSpecName: "ceph") pod "f5a52527-628d-427d-8edc-59dc40c51386" (UID: "f5a52527-628d-427d-8edc-59dc40c51386"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.338142 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc" (OuterVolumeSpecName: "kube-api-access-48rjc") pod "f5a52527-628d-427d-8edc-59dc40c51386" (UID: "f5a52527-628d-427d-8edc-59dc40c51386"). InnerVolumeSpecName "kube-api-access-48rjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.358543 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "f5a52527-628d-427d-8edc-59dc40c51386" (UID: "f5a52527-628d-427d-8edc-59dc40c51386"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.379299 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory" (OuterVolumeSpecName: "inventory") pod "f5a52527-628d-427d-8edc-59dc40c51386" (UID: "f5a52527-628d-427d-8edc-59dc40c51386"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.436165 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48rjc\" (UniqueName: \"kubernetes.io/projected/f5a52527-628d-427d-8edc-59dc40c51386-kube-api-access-48rjc\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.436204 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.436213 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.436224 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f5a52527-628d-427d-8edc-59dc40c51386-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.819100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" event={"ID":"f5a52527-628d-427d-8edc-59dc40c51386","Type":"ContainerDied","Data":"fe45ce92431d133250d1d81aee318b8350eef1ac899ce24080ae73d0a16be918"} Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.819421 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe45ce92431d133250d1d81aee318b8350eef1ac899ce24080ae73d0a16be918" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.819384 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-z622l" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.898717 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-khsth"] Jan 29 12:51:46 crc kubenswrapper[4852]: E0129 12:51:46.899212 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a52527-628d-427d-8edc-59dc40c51386" containerName="ceph-client-openstack-openstack-cell1" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.899232 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a52527-628d-427d-8edc-59dc40c51386" containerName="ceph-client-openstack-openstack-cell1" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.899456 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a52527-628d-427d-8edc-59dc40c51386" containerName="ceph-client-openstack-openstack-cell1" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.900196 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.905724 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.905909 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.906076 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.906191 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.906289 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.912633 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-khsth"] Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954402 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954537 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954566 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954667 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6hlk\" (UniqueName: \"kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954709 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:46 crc kubenswrapper[4852]: I0129 12:51:46.954776 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.056729 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6hlk\" (UniqueName: \"kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.056817 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.056903 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.057021 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.057109 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.057134 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.058915 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.062404 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.062735 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.063055 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.065886 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.074819 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6hlk\" (UniqueName: \"kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk\") pod \"ovn-openstack-openstack-cell1-khsth\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.218401 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.754647 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-khsth"] Jan 29 12:51:47 crc kubenswrapper[4852]: W0129 12:51:47.756197 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19e6cd4d_7066_4b75_9b90_79226950ef2f.slice/crio-367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6 WatchSource:0}: Error finding container 367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6: Status 404 returned error can't find the container with id 367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6 Jan 29 12:51:47 crc kubenswrapper[4852]: I0129 12:51:47.831629 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-khsth" event={"ID":"19e6cd4d-7066-4b75-9b90-79226950ef2f","Type":"ContainerStarted","Data":"367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6"} Jan 29 12:51:48 crc kubenswrapper[4852]: I0129 12:51:48.843017 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-khsth" event={"ID":"19e6cd4d-7066-4b75-9b90-79226950ef2f","Type":"ContainerStarted","Data":"e93ddcc6155b9eab0512f20bc44e862a5fb5da469f75c892511011f4d3395296"} Jan 29 12:51:48 crc kubenswrapper[4852]: I0129 12:51:48.866755 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-khsth" podStartSLOduration=2.38783569 podStartE2EDuration="2.866730002s" podCreationTimestamp="2026-01-29 12:51:46 +0000 UTC" firstStartedPulling="2026-01-29 12:51:47.761468295 +0000 UTC m=+7804.978799429" lastFinishedPulling="2026-01-29 12:51:48.240362587 +0000 UTC m=+7805.457693741" observedRunningTime="2026-01-29 12:51:48.860294074 +0000 UTC m=+7806.077625228" watchObservedRunningTime="2026-01-29 12:51:48.866730002 +0000 UTC m=+7806.084061156" Jan 29 12:51:49 crc kubenswrapper[4852]: I0129 12:51:49.463099 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:51:49 crc kubenswrapper[4852]: E0129 12:51:49.463649 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:52:04 crc kubenswrapper[4852]: I0129 12:52:04.463801 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:52:04 crc kubenswrapper[4852]: E0129 12:52:04.464677 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:52:19 crc kubenswrapper[4852]: I0129 12:52:19.464357 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:52:19 crc kubenswrapper[4852]: E0129 12:52:19.465663 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:52:32 crc kubenswrapper[4852]: I0129 12:52:32.464259 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:52:32 crc kubenswrapper[4852]: E0129 12:52:32.465184 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:52:44 crc kubenswrapper[4852]: I0129 12:52:44.464364 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:52:44 crc kubenswrapper[4852]: E0129 12:52:44.465634 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:52:50 crc kubenswrapper[4852]: I0129 12:52:50.509333 4852 generic.go:334] "Generic (PLEG): container finished" podID="19e6cd4d-7066-4b75-9b90-79226950ef2f" containerID="e93ddcc6155b9eab0512f20bc44e862a5fb5da469f75c892511011f4d3395296" exitCode=0 Jan 29 12:52:50 crc kubenswrapper[4852]: I0129 12:52:50.509968 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-khsth" event={"ID":"19e6cd4d-7066-4b75-9b90-79226950ef2f","Type":"ContainerDied","Data":"e93ddcc6155b9eab0512f20bc44e862a5fb5da469f75c892511011f4d3395296"} Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.072451 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.205692 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.205736 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.205761 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.205855 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6hlk\" (UniqueName: \"kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.206020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.206092 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph\") pod \"19e6cd4d-7066-4b75-9b90-79226950ef2f\" (UID: \"19e6cd4d-7066-4b75-9b90-79226950ef2f\") " Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.212354 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph" (OuterVolumeSpecName: "ceph") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.212492 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.216502 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk" (OuterVolumeSpecName: "kube-api-access-m6hlk") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "kube-api-access-m6hlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.241504 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.242283 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory" (OuterVolumeSpecName: "inventory") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.260946 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "19e6cd4d-7066-4b75-9b90-79226950ef2f" (UID: "19e6cd4d-7066-4b75-9b90-79226950ef2f"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309316 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309354 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309364 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309374 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19e6cd4d-7066-4b75-9b90-79226950ef2f-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309384 4852 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19e6cd4d-7066-4b75-9b90-79226950ef2f-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.309396 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6hlk\" (UniqueName: \"kubernetes.io/projected/19e6cd4d-7066-4b75-9b90-79226950ef2f-kube-api-access-m6hlk\") on node \"crc\" DevicePath \"\"" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.529838 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-khsth" event={"ID":"19e6cd4d-7066-4b75-9b90-79226950ef2f","Type":"ContainerDied","Data":"367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6"} Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.530115 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="367ef47aa5518a15ee507b2407182f08d353eed1dae23bd330d76ddaec26abc6" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.529947 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-khsth" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.643689 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-kd7tl"] Jan 29 12:52:52 crc kubenswrapper[4852]: E0129 12:52:52.644255 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19e6cd4d-7066-4b75-9b90-79226950ef2f" containerName="ovn-openstack-openstack-cell1" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.644275 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="19e6cd4d-7066-4b75-9b90-79226950ef2f" containerName="ovn-openstack-openstack-cell1" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.644545 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="19e6cd4d-7066-4b75-9b90-79226950ef2f" containerName="ovn-openstack-openstack-cell1" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.645447 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.653512 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-kd7tl"] Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.678048 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.678981 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.679070 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.680070 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.682186 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.698114 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.718995 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719105 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7npkv\" (UniqueName: \"kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719204 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719249 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719289 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719312 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.719351 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821361 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821429 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821472 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821497 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821541 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821663 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.821734 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7npkv\" (UniqueName: \"kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.827513 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.834046 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.835150 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.835351 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.842542 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.852342 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.852776 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7npkv\" (UniqueName: \"kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv\") pod \"neutron-metadata-openstack-openstack-cell1-kd7tl\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:52 crc kubenswrapper[4852]: I0129 12:52:52.992899 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:52:53 crc kubenswrapper[4852]: I0129 12:52:53.586449 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-kd7tl"] Jan 29 12:52:54 crc kubenswrapper[4852]: I0129 12:52:54.556915 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" event={"ID":"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1","Type":"ContainerStarted","Data":"c6afba259be3d19917e0a20f91557ebb19479e54284211a8afca61f162bcb77b"} Jan 29 12:52:54 crc kubenswrapper[4852]: I0129 12:52:54.557308 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" event={"ID":"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1","Type":"ContainerStarted","Data":"250c79045997f1dc0f051f2dfce2bcbd39c63aec0cedae30017a321fac9e06d6"} Jan 29 12:52:54 crc kubenswrapper[4852]: I0129 12:52:54.593291 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" podStartSLOduration=1.99404126 podStartE2EDuration="2.593258743s" podCreationTimestamp="2026-01-29 12:52:52 +0000 UTC" firstStartedPulling="2026-01-29 12:52:53.595009672 +0000 UTC m=+7870.812340806" lastFinishedPulling="2026-01-29 12:52:54.194227155 +0000 UTC m=+7871.411558289" observedRunningTime="2026-01-29 12:52:54.578883752 +0000 UTC m=+7871.796214886" watchObservedRunningTime="2026-01-29 12:52:54.593258743 +0000 UTC m=+7871.810589897" Jan 29 12:52:58 crc kubenswrapper[4852]: I0129 12:52:58.463083 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:52:58 crc kubenswrapper[4852]: E0129 12:52:58.463881 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:53:04 crc kubenswrapper[4852]: I0129 12:53:04.994194 4852 scope.go:117] "RemoveContainer" containerID="d1eee08c30b31cf8efcd04a1f47a9c9b65ddb454d53d315d1f0ac6fcc1622942" Jan 29 12:53:05 crc kubenswrapper[4852]: I0129 12:53:05.020818 4852 scope.go:117] "RemoveContainer" containerID="8c287d6e041d8a71828e9c3392ddf396e54990c0195dfb1cac66b072aac37634" Jan 29 12:53:05 crc kubenswrapper[4852]: I0129 12:53:05.073012 4852 scope.go:117] "RemoveContainer" containerID="aa7df4943d91236922acc454d5f7a01f07a9e968e8d96a9e0a66840141e55b10" Jan 29 12:53:09 crc kubenswrapper[4852]: I0129 12:53:09.463850 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:53:09 crc kubenswrapper[4852]: E0129 12:53:09.464486 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:53:24 crc kubenswrapper[4852]: I0129 12:53:24.465303 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:53:24 crc kubenswrapper[4852]: E0129 12:53:24.480146 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:53:38 crc kubenswrapper[4852]: I0129 12:53:38.463751 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:53:38 crc kubenswrapper[4852]: E0129 12:53:38.464453 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:53:43 crc kubenswrapper[4852]: I0129 12:53:43.039222 4852 generic.go:334] "Generic (PLEG): container finished" podID="98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" containerID="c6afba259be3d19917e0a20f91557ebb19479e54284211a8afca61f162bcb77b" exitCode=0 Jan 29 12:53:43 crc kubenswrapper[4852]: I0129 12:53:43.039311 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" event={"ID":"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1","Type":"ContainerDied","Data":"c6afba259be3d19917e0a20f91557ebb19479e54284211a8afca61f162bcb77b"} Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.482550 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595290 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595348 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595461 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595510 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7npkv\" (UniqueName: \"kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595557 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595636 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.595718 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\" (UID: \"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1\") " Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.601159 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph" (OuterVolumeSpecName: "ceph") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.612914 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv" (OuterVolumeSpecName: "kube-api-access-7npkv") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "kube-api-access-7npkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.613802 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.622878 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.636164 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.640815 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.641172 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory" (OuterVolumeSpecName: "inventory") pod "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" (UID: "98bc1451-81f8-41ed-8e6e-c56eca9ca0f1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698158 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698194 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698205 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698214 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698222 4852 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698231 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7npkv\" (UniqueName: \"kubernetes.io/projected/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-kube-api-access-7npkv\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:44 crc kubenswrapper[4852]: I0129 12:53:44.698242 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/98bc1451-81f8-41ed-8e6e-c56eca9ca0f1-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.062561 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" event={"ID":"98bc1451-81f8-41ed-8e6e-c56eca9ca0f1","Type":"ContainerDied","Data":"250c79045997f1dc0f051f2dfce2bcbd39c63aec0cedae30017a321fac9e06d6"} Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.062621 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="250c79045997f1dc0f051f2dfce2bcbd39c63aec0cedae30017a321fac9e06d6" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.062652 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-kd7tl" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.165261 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gqh6l"] Jan 29 12:53:45 crc kubenswrapper[4852]: E0129 12:53:45.166111 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.166137 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.166457 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="98bc1451-81f8-41ed-8e6e-c56eca9ca0f1" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.167703 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.170396 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.170862 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.170872 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.174957 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.177322 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.180709 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gqh6l"] Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311185 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311247 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311296 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311340 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311378 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwnh8\" (UniqueName: \"kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.311822 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413315 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413393 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwnh8\" (UniqueName: \"kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413513 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413560 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.413596 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.418695 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.419457 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.419533 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.419009 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.421774 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.433898 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwnh8\" (UniqueName: \"kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8\") pod \"libvirt-openstack-openstack-cell1-gqh6l\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:45 crc kubenswrapper[4852]: I0129 12:53:45.504415 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:53:46 crc kubenswrapper[4852]: I0129 12:53:46.087343 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gqh6l"] Jan 29 12:53:46 crc kubenswrapper[4852]: I0129 12:53:46.094794 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:53:47 crc kubenswrapper[4852]: I0129 12:53:47.083998 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" event={"ID":"bd812255-d120-4225-98da-716373cacd08","Type":"ContainerStarted","Data":"37c378aef0c4e9183f3f664b695b81ee65def9e0cdb848f6445021e2d2ce0a05"} Jan 29 12:53:47 crc kubenswrapper[4852]: I0129 12:53:47.084335 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" event={"ID":"bd812255-d120-4225-98da-716373cacd08","Type":"ContainerStarted","Data":"b02ddd53965fdef91352648510aab629ff643cae99ff87c42a7612abd126d265"} Jan 29 12:53:50 crc kubenswrapper[4852]: I0129 12:53:50.463727 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:53:50 crc kubenswrapper[4852]: E0129 12:53:50.465717 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:54:03 crc kubenswrapper[4852]: I0129 12:54:03.474455 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:54:03 crc kubenswrapper[4852]: E0129 12:54:03.475975 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:54:18 crc kubenswrapper[4852]: I0129 12:54:18.464346 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:54:18 crc kubenswrapper[4852]: E0129 12:54:18.465116 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:54:32 crc kubenswrapper[4852]: I0129 12:54:32.463681 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:54:32 crc kubenswrapper[4852]: E0129 12:54:32.464531 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:54:43 crc kubenswrapper[4852]: I0129 12:54:43.471892 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:54:43 crc kubenswrapper[4852]: E0129 12:54:43.473155 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:54:54 crc kubenswrapper[4852]: I0129 12:54:54.463415 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:54:54 crc kubenswrapper[4852]: E0129 12:54:54.464457 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:55:08 crc kubenswrapper[4852]: I0129 12:55:08.463611 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:55:08 crc kubenswrapper[4852]: E0129 12:55:08.464349 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:55:20 crc kubenswrapper[4852]: I0129 12:55:20.463736 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:55:20 crc kubenswrapper[4852]: E0129 12:55:20.464364 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 12:55:34 crc kubenswrapper[4852]: I0129 12:55:34.463707 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 12:55:35 crc kubenswrapper[4852]: I0129 12:55:35.242162 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76"} Jan 29 12:55:35 crc kubenswrapper[4852]: I0129 12:55:35.277399 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" podStartSLOduration=109.784014854 podStartE2EDuration="1m50.277377519s" podCreationTimestamp="2026-01-29 12:53:45 +0000 UTC" firstStartedPulling="2026-01-29 12:53:46.094539255 +0000 UTC m=+7923.311870389" lastFinishedPulling="2026-01-29 12:53:46.58790191 +0000 UTC m=+7923.805233054" observedRunningTime="2026-01-29 12:53:47.103859846 +0000 UTC m=+7924.321190980" watchObservedRunningTime="2026-01-29 12:55:35.277377519 +0000 UTC m=+8032.494708663" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.209831 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.214702 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.246009 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.327213 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.327366 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwsfr\" (UniqueName: \"kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.327510 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.429544 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwsfr\" (UniqueName: \"kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.429658 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.429801 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.430309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.430322 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.448925 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwsfr\" (UniqueName: \"kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr\") pod \"redhat-marketplace-699zf\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:55 crc kubenswrapper[4852]: I0129 12:56:55.562305 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:56:56 crc kubenswrapper[4852]: I0129 12:56:56.053830 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:56:56 crc kubenswrapper[4852]: I0129 12:56:56.236331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerStarted","Data":"77b0ad0a381a5b358f087a54ba116d83208d21a4fc2c94dc7cc035f0d8ab36c9"} Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.251427 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerID="aeb41be860402265afd01444d42c4ec73b13735c0da4e256fe8d81d1261b2396" exitCode=0 Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.251533 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerDied","Data":"aeb41be860402265afd01444d42c4ec73b13735c0da4e256fe8d81d1261b2396"} Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.406693 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.409518 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.423497 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.583886 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.584472 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtzwg\" (UniqueName: \"kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.584729 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.593396 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.595864 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.611369 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687263 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlz9h\" (UniqueName: \"kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687304 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687448 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687559 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtzwg\" (UniqueName: \"kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687877 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.687990 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.688081 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.688498 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.708057 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtzwg\" (UniqueName: \"kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg\") pod \"community-operators-8hhfb\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.740217 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.792422 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlz9h\" (UniqueName: \"kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.792488 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.792689 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.793211 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.806600 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.818381 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlz9h\" (UniqueName: \"kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h\") pod \"redhat-operators-p8nnc\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:57 crc kubenswrapper[4852]: I0129 12:56:57.923475 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:56:58 crc kubenswrapper[4852]: I0129 12:56:58.265331 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerStarted","Data":"8b93f4670e0f3274957791bd1af1d0c496ab9586168f3bbc6d3a1f5b3555dac5"} Jan 29 12:56:58 crc kubenswrapper[4852]: W0129 12:56:58.372112 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49e8e5f1_7c11_41c1_89b1_4a3fb330199b.slice/crio-7bea46bda60dd840d41b1bfcce32de52c2db318c6293e69e837ce55a58725b50 WatchSource:0}: Error finding container 7bea46bda60dd840d41b1bfcce32de52c2db318c6293e69e837ce55a58725b50: Status 404 returned error can't find the container with id 7bea46bda60dd840d41b1bfcce32de52c2db318c6293e69e837ce55a58725b50 Jan 29 12:56:58 crc kubenswrapper[4852]: I0129 12:56:58.376262 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:56:58 crc kubenswrapper[4852]: I0129 12:56:58.583317 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:56:58 crc kubenswrapper[4852]: W0129 12:56:58.620370 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92a0e805_7a17_41bb_8d08_bf42f4a115ac.slice/crio-3d943b9e15ba50203679b1ad967945fe7053125ad13ac8e4201ffb9d5543db60 WatchSource:0}: Error finding container 3d943b9e15ba50203679b1ad967945fe7053125ad13ac8e4201ffb9d5543db60: Status 404 returned error can't find the container with id 3d943b9e15ba50203679b1ad967945fe7053125ad13ac8e4201ffb9d5543db60 Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.287114 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerID="8b93f4670e0f3274957791bd1af1d0c496ab9586168f3bbc6d3a1f5b3555dac5" exitCode=0 Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.287243 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerDied","Data":"8b93f4670e0f3274957791bd1af1d0c496ab9586168f3bbc6d3a1f5b3555dac5"} Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.295945 4852 generic.go:334] "Generic (PLEG): container finished" podID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerID="54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720" exitCode=0 Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.296004 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerDied","Data":"54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720"} Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.296070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerStarted","Data":"3d943b9e15ba50203679b1ad967945fe7053125ad13ac8e4201ffb9d5543db60"} Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.299288 4852 generic.go:334] "Generic (PLEG): container finished" podID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerID="12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58" exitCode=0 Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.299332 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerDied","Data":"12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58"} Jan 29 12:56:59 crc kubenswrapper[4852]: I0129 12:56:59.299363 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerStarted","Data":"7bea46bda60dd840d41b1bfcce32de52c2db318c6293e69e837ce55a58725b50"} Jan 29 12:57:00 crc kubenswrapper[4852]: I0129 12:57:00.313070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerStarted","Data":"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049"} Jan 29 12:57:00 crc kubenswrapper[4852]: I0129 12:57:00.317897 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerStarted","Data":"4c5e3a79d0b500faad764ba56c82ff82ce29553b2a6a6fb2296309be440b4d97"} Jan 29 12:57:00 crc kubenswrapper[4852]: I0129 12:57:00.321570 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerStarted","Data":"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b"} Jan 29 12:57:00 crc kubenswrapper[4852]: I0129 12:57:00.371290 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-699zf" podStartSLOduration=2.88415583 podStartE2EDuration="5.371268251s" podCreationTimestamp="2026-01-29 12:56:55 +0000 UTC" firstStartedPulling="2026-01-29 12:56:57.255745725 +0000 UTC m=+8114.473076859" lastFinishedPulling="2026-01-29 12:56:59.742858146 +0000 UTC m=+8116.960189280" observedRunningTime="2026-01-29 12:57:00.365937771 +0000 UTC m=+8117.583268915" watchObservedRunningTime="2026-01-29 12:57:00.371268251 +0000 UTC m=+8117.588599395" Jan 29 12:57:03 crc kubenswrapper[4852]: I0129 12:57:03.354712 4852 generic.go:334] "Generic (PLEG): container finished" podID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerID="9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049" exitCode=0 Jan 29 12:57:03 crc kubenswrapper[4852]: I0129 12:57:03.355237 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerDied","Data":"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049"} Jan 29 12:57:04 crc kubenswrapper[4852]: I0129 12:57:04.402263 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerStarted","Data":"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d"} Jan 29 12:57:04 crc kubenswrapper[4852]: I0129 12:57:04.427790 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8hhfb" podStartSLOduration=2.945112101 podStartE2EDuration="7.427766492s" podCreationTimestamp="2026-01-29 12:56:57 +0000 UTC" firstStartedPulling="2026-01-29 12:56:59.301106227 +0000 UTC m=+8116.518437361" lastFinishedPulling="2026-01-29 12:57:03.783760618 +0000 UTC m=+8121.001091752" observedRunningTime="2026-01-29 12:57:04.425987369 +0000 UTC m=+8121.643318493" watchObservedRunningTime="2026-01-29 12:57:04.427766492 +0000 UTC m=+8121.645097626" Jan 29 12:57:05 crc kubenswrapper[4852]: I0129 12:57:05.563599 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:05 crc kubenswrapper[4852]: I0129 12:57:05.563648 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:05 crc kubenswrapper[4852]: I0129 12:57:05.629086 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:06 crc kubenswrapper[4852]: I0129 12:57:06.500870 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:07 crc kubenswrapper[4852]: I0129 12:57:07.448239 4852 generic.go:334] "Generic (PLEG): container finished" podID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerID="3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b" exitCode=0 Jan 29 12:57:07 crc kubenswrapper[4852]: I0129 12:57:07.448292 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerDied","Data":"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b"} Jan 29 12:57:07 crc kubenswrapper[4852]: I0129 12:57:07.741551 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:07 crc kubenswrapper[4852]: I0129 12:57:07.741627 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:07 crc kubenswrapper[4852]: I0129 12:57:07.981533 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:57:08 crc kubenswrapper[4852]: I0129 12:57:08.458848 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerStarted","Data":"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804"} Jan 29 12:57:08 crc kubenswrapper[4852]: I0129 12:57:08.459075 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-699zf" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="registry-server" containerID="cri-o://4c5e3a79d0b500faad764ba56c82ff82ce29553b2a6a6fb2296309be440b4d97" gracePeriod=2 Jan 29 12:57:08 crc kubenswrapper[4852]: I0129 12:57:08.485055 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p8nnc" podStartSLOduration=2.913251176 podStartE2EDuration="11.485031173s" podCreationTimestamp="2026-01-29 12:56:57 +0000 UTC" firstStartedPulling="2026-01-29 12:56:59.298443893 +0000 UTC m=+8116.515775027" lastFinishedPulling="2026-01-29 12:57:07.87022389 +0000 UTC m=+8125.087555024" observedRunningTime="2026-01-29 12:57:08.478270888 +0000 UTC m=+8125.695602022" watchObservedRunningTime="2026-01-29 12:57:08.485031173 +0000 UTC m=+8125.702362307" Jan 29 12:57:08 crc kubenswrapper[4852]: I0129 12:57:08.791546 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-8hhfb" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="registry-server" probeResult="failure" output=< Jan 29 12:57:08 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 12:57:08 crc kubenswrapper[4852]: > Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.471547 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerID="4c5e3a79d0b500faad764ba56c82ff82ce29553b2a6a6fb2296309be440b4d97" exitCode=0 Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.478274 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerDied","Data":"4c5e3a79d0b500faad764ba56c82ff82ce29553b2a6a6fb2296309be440b4d97"} Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.478489 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-699zf" event={"ID":"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b","Type":"ContainerDied","Data":"77b0ad0a381a5b358f087a54ba116d83208d21a4fc2c94dc7cc035f0d8ab36c9"} Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.478553 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77b0ad0a381a5b358f087a54ba116d83208d21a4fc2c94dc7cc035f0d8ab36c9" Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.548798 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.696120 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content\") pod \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.696177 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities\") pod \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.696513 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwsfr\" (UniqueName: \"kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr\") pod \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\" (UID: \"1b94185d-c5da-4bb7-8fad-75d4b5cbef9b\") " Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.696893 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities" (OuterVolumeSpecName: "utilities") pod "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" (UID: "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.698102 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.704454 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr" (OuterVolumeSpecName: "kube-api-access-qwsfr") pod "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" (UID: "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b"). InnerVolumeSpecName "kube-api-access-qwsfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:57:09 crc kubenswrapper[4852]: I0129 12:57:09.715531 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" (UID: "1b94185d-c5da-4bb7-8fad-75d4b5cbef9b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:10 crc kubenswrapper[4852]: I0129 12:57:10.071248 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwsfr\" (UniqueName: \"kubernetes.io/projected/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-kube-api-access-qwsfr\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:10 crc kubenswrapper[4852]: I0129 12:57:10.071289 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:10 crc kubenswrapper[4852]: I0129 12:57:10.482609 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-699zf" Jan 29 12:57:10 crc kubenswrapper[4852]: I0129 12:57:10.530909 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:57:10 crc kubenswrapper[4852]: I0129 12:57:10.543327 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-699zf"] Jan 29 12:57:11 crc kubenswrapper[4852]: I0129 12:57:11.475087 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" path="/var/lib/kubelet/pods/1b94185d-c5da-4bb7-8fad-75d4b5cbef9b/volumes" Jan 29 12:57:17 crc kubenswrapper[4852]: I0129 12:57:17.794874 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:17 crc kubenswrapper[4852]: I0129 12:57:17.870347 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:17 crc kubenswrapper[4852]: I0129 12:57:17.924509 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:17 crc kubenswrapper[4852]: I0129 12:57:17.924555 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:17 crc kubenswrapper[4852]: I0129 12:57:17.972556 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:18 crc kubenswrapper[4852]: I0129 12:57:18.036804 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:57:18 crc kubenswrapper[4852]: I0129 12:57:18.624999 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:19 crc kubenswrapper[4852]: I0129 12:57:19.571561 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8hhfb" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="registry-server" containerID="cri-o://b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d" gracePeriod=2 Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.158454 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.236371 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.335473 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities\") pod \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.335544 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content\") pod \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.335616 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtzwg\" (UniqueName: \"kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg\") pod \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\" (UID: \"49e8e5f1-7c11-41c1-89b1-4a3fb330199b\") " Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.336902 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities" (OuterVolumeSpecName: "utilities") pod "49e8e5f1-7c11-41c1-89b1-4a3fb330199b" (UID: "49e8e5f1-7c11-41c1-89b1-4a3fb330199b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.350348 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg" (OuterVolumeSpecName: "kube-api-access-rtzwg") pod "49e8e5f1-7c11-41c1-89b1-4a3fb330199b" (UID: "49e8e5f1-7c11-41c1-89b1-4a3fb330199b"). InnerVolumeSpecName "kube-api-access-rtzwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.415204 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49e8e5f1-7c11-41c1-89b1-4a3fb330199b" (UID: "49e8e5f1-7c11-41c1-89b1-4a3fb330199b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.439083 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.439128 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.439143 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtzwg\" (UniqueName: \"kubernetes.io/projected/49e8e5f1-7c11-41c1-89b1-4a3fb330199b-kube-api-access-rtzwg\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587244 4852 generic.go:334] "Generic (PLEG): container finished" podID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerID="b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d" exitCode=0 Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587371 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhfb" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587374 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerDied","Data":"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d"} Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587525 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhfb" event={"ID":"49e8e5f1-7c11-41c1-89b1-4a3fb330199b","Type":"ContainerDied","Data":"7bea46bda60dd840d41b1bfcce32de52c2db318c6293e69e837ce55a58725b50"} Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587565 4852 scope.go:117] "RemoveContainer" containerID="b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.587863 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p8nnc" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="registry-server" containerID="cri-o://f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804" gracePeriod=2 Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.621721 4852 scope.go:117] "RemoveContainer" containerID="9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.642417 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.657242 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8hhfb"] Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.669557 4852 scope.go:117] "RemoveContainer" containerID="12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.898610 4852 scope.go:117] "RemoveContainer" containerID="b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d" Jan 29 12:57:20 crc kubenswrapper[4852]: E0129 12:57:20.899084 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d\": container with ID starting with b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d not found: ID does not exist" containerID="b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.899132 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d"} err="failed to get container status \"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d\": rpc error: code = NotFound desc = could not find container \"b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d\": container with ID starting with b5f79aba7604ccdbca389c47558bc4f60cd6bfb185e9ffb8d1d5c363fc84b84d not found: ID does not exist" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.899172 4852 scope.go:117] "RemoveContainer" containerID="9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049" Jan 29 12:57:20 crc kubenswrapper[4852]: E0129 12:57:20.899729 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049\": container with ID starting with 9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049 not found: ID does not exist" containerID="9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.899772 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049"} err="failed to get container status \"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049\": rpc error: code = NotFound desc = could not find container \"9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049\": container with ID starting with 9359ea9b24f903204d51d4ddb30fc5b5422dc1d3f78f04197bd491978b61f049 not found: ID does not exist" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.899808 4852 scope.go:117] "RemoveContainer" containerID="12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58" Jan 29 12:57:20 crc kubenswrapper[4852]: E0129 12:57:20.900063 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58\": container with ID starting with 12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58 not found: ID does not exist" containerID="12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58" Jan 29 12:57:20 crc kubenswrapper[4852]: I0129 12:57:20.900093 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58"} err="failed to get container status \"12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58\": rpc error: code = NotFound desc = could not find container \"12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58\": container with ID starting with 12103a38dfb630fab567bc817991b128384ba9439588ac0eada36ef50172ed58 not found: ID does not exist" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.168301 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.362696 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities\") pod \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.363110 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content\") pod \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.363216 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlz9h\" (UniqueName: \"kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h\") pod \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\" (UID: \"92a0e805-7a17-41bb-8d08-bf42f4a115ac\") " Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.364266 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities" (OuterVolumeSpecName: "utilities") pod "92a0e805-7a17-41bb-8d08-bf42f4a115ac" (UID: "92a0e805-7a17-41bb-8d08-bf42f4a115ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.368049 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h" (OuterVolumeSpecName: "kube-api-access-vlz9h") pod "92a0e805-7a17-41bb-8d08-bf42f4a115ac" (UID: "92a0e805-7a17-41bb-8d08-bf42f4a115ac"). InnerVolumeSpecName "kube-api-access-vlz9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.465514 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.465544 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlz9h\" (UniqueName: \"kubernetes.io/projected/92a0e805-7a17-41bb-8d08-bf42f4a115ac-kube-api-access-vlz9h\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.481864 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" path="/var/lib/kubelet/pods/49e8e5f1-7c11-41c1-89b1-4a3fb330199b/volumes" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.504040 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92a0e805-7a17-41bb-8d08-bf42f4a115ac" (UID: "92a0e805-7a17-41bb-8d08-bf42f4a115ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.567853 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92a0e805-7a17-41bb-8d08-bf42f4a115ac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.612497 4852 generic.go:334] "Generic (PLEG): container finished" podID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerID="f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804" exitCode=0 Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.612579 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerDied","Data":"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804"} Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.612645 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8nnc" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.612667 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8nnc" event={"ID":"92a0e805-7a17-41bb-8d08-bf42f4a115ac","Type":"ContainerDied","Data":"3d943b9e15ba50203679b1ad967945fe7053125ad13ac8e4201ffb9d5543db60"} Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.612691 4852 scope.go:117] "RemoveContainer" containerID="f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.639985 4852 scope.go:117] "RemoveContainer" containerID="3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.664503 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.678987 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p8nnc"] Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.679769 4852 scope.go:117] "RemoveContainer" containerID="54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.704710 4852 scope.go:117] "RemoveContainer" containerID="f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804" Jan 29 12:57:21 crc kubenswrapper[4852]: E0129 12:57:21.705252 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804\": container with ID starting with f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804 not found: ID does not exist" containerID="f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.705296 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804"} err="failed to get container status \"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804\": rpc error: code = NotFound desc = could not find container \"f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804\": container with ID starting with f0bb0e66730091e5bdd06bc96bc8dfe54b06551a767b8f02858a020ac6931804 not found: ID does not exist" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.705327 4852 scope.go:117] "RemoveContainer" containerID="3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b" Jan 29 12:57:21 crc kubenswrapper[4852]: E0129 12:57:21.705914 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b\": container with ID starting with 3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b not found: ID does not exist" containerID="3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.705942 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b"} err="failed to get container status \"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b\": rpc error: code = NotFound desc = could not find container \"3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b\": container with ID starting with 3b66437b5b129b943b695891af0f08fea6f747638eb0cca38c6941ff8383bb0b not found: ID does not exist" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.705965 4852 scope.go:117] "RemoveContainer" containerID="54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720" Jan 29 12:57:21 crc kubenswrapper[4852]: E0129 12:57:21.706306 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720\": container with ID starting with 54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720 not found: ID does not exist" containerID="54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720" Jan 29 12:57:21 crc kubenswrapper[4852]: I0129 12:57:21.706333 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720"} err="failed to get container status \"54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720\": rpc error: code = NotFound desc = could not find container \"54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720\": container with ID starting with 54e3629a934fd9cd6ef7db375966e5c2eef1ec677941a652e2f8991cd52eb720 not found: ID does not exist" Jan 29 12:57:23 crc kubenswrapper[4852]: I0129 12:57:23.475862 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" path="/var/lib/kubelet/pods/92a0e805-7a17-41bb-8d08-bf42f4a115ac/volumes" Jan 29 12:58:00 crc kubenswrapper[4852]: I0129 12:58:00.017701 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:58:00 crc kubenswrapper[4852]: I0129 12:58:00.018210 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:58:21 crc kubenswrapper[4852]: I0129 12:58:21.282136 4852 generic.go:334] "Generic (PLEG): container finished" podID="bd812255-d120-4225-98da-716373cacd08" containerID="37c378aef0c4e9183f3f664b695b81ee65def9e0cdb848f6445021e2d2ce0a05" exitCode=0 Jan 29 12:58:21 crc kubenswrapper[4852]: I0129 12:58:21.282209 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" event={"ID":"bd812255-d120-4225-98da-716373cacd08","Type":"ContainerDied","Data":"37c378aef0c4e9183f3f664b695b81ee65def9e0cdb848f6445021e2d2ce0a05"} Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.785119 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.890980 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.891152 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.891172 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.891260 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.891282 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwnh8\" (UniqueName: \"kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.891305 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph\") pod \"bd812255-d120-4225-98da-716373cacd08\" (UID: \"bd812255-d120-4225-98da-716373cacd08\") " Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.898017 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.898408 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8" (OuterVolumeSpecName: "kube-api-access-xwnh8") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "kube-api-access-xwnh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.903265 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph" (OuterVolumeSpecName: "ceph") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.925567 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.939830 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory" (OuterVolumeSpecName: "inventory") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.954686 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "bd812255-d120-4225-98da-716373cacd08" (UID: "bd812255-d120-4225-98da-716373cacd08"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994384 4852 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994418 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994430 4852 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994438 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994448 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwnh8\" (UniqueName: \"kubernetes.io/projected/bd812255-d120-4225-98da-716373cacd08-kube-api-access-xwnh8\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:22 crc kubenswrapper[4852]: I0129 12:58:22.994456 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd812255-d120-4225-98da-716373cacd08-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.314447 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" event={"ID":"bd812255-d120-4225-98da-716373cacd08","Type":"ContainerDied","Data":"b02ddd53965fdef91352648510aab629ff643cae99ff87c42a7612abd126d265"} Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.314499 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b02ddd53965fdef91352648510aab629ff643cae99ff87c42a7612abd126d265" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.314570 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gqh6l" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.420903 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-snxf7"] Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421453 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421469 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421482 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd812255-d120-4225-98da-716373cacd08" containerName="libvirt-openstack-openstack-cell1" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421488 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd812255-d120-4225-98da-716373cacd08" containerName="libvirt-openstack-openstack-cell1" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421501 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421507 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421522 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421528 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421536 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421543 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421555 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421560 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421572 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421598 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="extract-utilities" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421616 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421621 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421634 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421640 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: E0129 12:58:23.421652 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421657 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="extract-content" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421862 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="92a0e805-7a17-41bb-8d08-bf42f4a115ac" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421877 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b94185d-c5da-4bb7-8fad-75d4b5cbef9b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421889 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd812255-d120-4225-98da-716373cacd08" containerName="libvirt-openstack-openstack-cell1" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.421906 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="49e8e5f1-7c11-41c1-89b1-4a3fb330199b" containerName="registry-server" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.422752 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.425801 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426035 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426123 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426149 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426129 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426215 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.426521 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.432945 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-snxf7"] Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609191 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609326 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609364 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p22jp\" (UniqueName: \"kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609575 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609716 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609824 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.609926 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.610055 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.610260 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.610360 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.610513 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.712484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.712904 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.713159 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.713384 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.713717 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.714142 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.714398 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p22jp\" (UniqueName: \"kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.714830 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.715138 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.715470 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.715750 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.715259 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.715767 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.720897 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.721414 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.721628 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.722144 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.723003 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.724285 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.729778 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.731623 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.754802 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p22jp\" (UniqueName: \"kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp\") pod \"nova-cell1-openstack-openstack-cell1-snxf7\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:23 crc kubenswrapper[4852]: I0129 12:58:23.763554 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 12:58:24 crc kubenswrapper[4852]: I0129 12:58:24.308768 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-snxf7"] Jan 29 12:58:25 crc kubenswrapper[4852]: I0129 12:58:25.340158 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" event={"ID":"90675cdc-8008-4d42-8f75-e0f67752eef4","Type":"ContainerStarted","Data":"d11ee5e1d95f472f0f7fc0ed9a7ce434ecda22ac2e9d1f91d1bce7304f475f93"} Jan 29 12:58:25 crc kubenswrapper[4852]: I0129 12:58:25.340430 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" event={"ID":"90675cdc-8008-4d42-8f75-e0f67752eef4","Type":"ContainerStarted","Data":"f4bdd6564099a3c56952d07ef151fad821c19e68d1bc025bdf29cac9b2f32c39"} Jan 29 12:58:25 crc kubenswrapper[4852]: I0129 12:58:25.373795 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" podStartSLOduration=1.827422356 podStartE2EDuration="2.373770901s" podCreationTimestamp="2026-01-29 12:58:23 +0000 UTC" firstStartedPulling="2026-01-29 12:58:24.321516444 +0000 UTC m=+8201.538847578" lastFinishedPulling="2026-01-29 12:58:24.867864969 +0000 UTC m=+8202.085196123" observedRunningTime="2026-01-29 12:58:25.369240261 +0000 UTC m=+8202.586571415" watchObservedRunningTime="2026-01-29 12:58:25.373770901 +0000 UTC m=+8202.591102035" Jan 29 12:58:30 crc kubenswrapper[4852]: I0129 12:58:30.017395 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:58:30 crc kubenswrapper[4852]: I0129 12:58:30.018213 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.017337 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.017995 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.018060 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.019124 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.019194 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76" gracePeriod=600 Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.790099 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76" exitCode=0 Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.790157 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76"} Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.790495 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6"} Jan 29 12:59:00 crc kubenswrapper[4852]: I0129 12:59:00.790534 4852 scope.go:117] "RemoveContainer" containerID="5d788b91958f259c0349bfc4bd51079feb03681d6dd945324b89240de9aece35" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.177641 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9"] Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.180735 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.183736 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.183957 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.191526 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9"] Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.316630 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.316864 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.316896 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt6zb\" (UniqueName: \"kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.420298 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.420375 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt6zb\" (UniqueName: \"kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.420630 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.421927 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.431453 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.439548 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt6zb\" (UniqueName: \"kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb\") pod \"collect-profiles-29494860-mg6b9\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.507768 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:00 crc kubenswrapper[4852]: I0129 13:00:00.998490 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9"] Jan 29 13:00:01 crc kubenswrapper[4852]: I0129 13:00:01.596674 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" event={"ID":"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52","Type":"ContainerStarted","Data":"712a9337ded936cd305ea50a27e177e54335384dd3a78bad8bc0288c3f065a9d"} Jan 29 13:00:01 crc kubenswrapper[4852]: I0129 13:00:01.597028 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" event={"ID":"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52","Type":"ContainerStarted","Data":"b62e73df34ea5f4318129f16213e0195ac8bc0787da6c725b22b13199049544a"} Jan 29 13:00:01 crc kubenswrapper[4852]: I0129 13:00:01.616804 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" podStartSLOduration=1.6167503399999998 podStartE2EDuration="1.61675034s" podCreationTimestamp="2026-01-29 13:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:00:01.610822866 +0000 UTC m=+8298.828154040" watchObservedRunningTime="2026-01-29 13:00:01.61675034 +0000 UTC m=+8298.834081504" Jan 29 13:00:02 crc kubenswrapper[4852]: I0129 13:00:02.608146 4852 generic.go:334] "Generic (PLEG): container finished" podID="6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" containerID="712a9337ded936cd305ea50a27e177e54335384dd3a78bad8bc0288c3f065a9d" exitCode=0 Jan 29 13:00:02 crc kubenswrapper[4852]: I0129 13:00:02.608262 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" event={"ID":"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52","Type":"ContainerDied","Data":"712a9337ded936cd305ea50a27e177e54335384dd3a78bad8bc0288c3f065a9d"} Jan 29 13:00:03 crc kubenswrapper[4852]: I0129 13:00:03.993487 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.108046 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume\") pod \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.108455 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume\") pod \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.108724 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt6zb\" (UniqueName: \"kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb\") pod \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\" (UID: \"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52\") " Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.109131 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume" (OuterVolumeSpecName: "config-volume") pod "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" (UID: "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.113860 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb" (OuterVolumeSpecName: "kube-api-access-wt6zb") pod "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" (UID: "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52"). InnerVolumeSpecName "kube-api-access-wt6zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.113977 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" (UID: "6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.211743 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.211786 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt6zb\" (UniqueName: \"kubernetes.io/projected/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-kube-api-access-wt6zb\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.211799 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.632254 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" event={"ID":"6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52","Type":"ContainerDied","Data":"b62e73df34ea5f4318129f16213e0195ac8bc0787da6c725b22b13199049544a"} Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.632668 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b62e73df34ea5f4318129f16213e0195ac8bc0787da6c725b22b13199049544a" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.632338 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494860-mg6b9" Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.717764 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn"] Jan 29 13:00:04 crc kubenswrapper[4852]: I0129 13:00:04.728388 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-8x2hn"] Jan 29 13:00:05 crc kubenswrapper[4852]: I0129 13:00:05.475850 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10d09a8a-3459-4001-a749-d405816b1b52" path="/var/lib/kubelet/pods/10d09a8a-3459-4001-a749-d405816b1b52/volumes" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.841693 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:18 crc kubenswrapper[4852]: E0129 13:00:18.843311 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" containerName="collect-profiles" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.843332 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" containerName="collect-profiles" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.843537 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a4c93ee-1caa-4ae6-97ce-bfee0bd9cc52" containerName="collect-profiles" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.845600 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.864855 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.976198 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.976315 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlhhf\" (UniqueName: \"kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:18 crc kubenswrapper[4852]: I0129 13:00:18.976462 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.078942 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.079222 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.079334 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlhhf\" (UniqueName: \"kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.079966 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.080137 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.100779 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlhhf\" (UniqueName: \"kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf\") pod \"certified-operators-7r7xt\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.171113 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.715777 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.969686 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerStarted","Data":"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34"} Jan 29 13:00:19 crc kubenswrapper[4852]: I0129 13:00:19.970160 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerStarted","Data":"ff4db70caa2e6c9a868d982a243dbc7c2f4a8b50f1f4d9eb0bcb62567c80d3d9"} Jan 29 13:00:20 crc kubenswrapper[4852]: I0129 13:00:20.990940 4852 generic.go:334] "Generic (PLEG): container finished" podID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerID="6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34" exitCode=0 Jan 29 13:00:20 crc kubenswrapper[4852]: I0129 13:00:20.991005 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerDied","Data":"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34"} Jan 29 13:00:20 crc kubenswrapper[4852]: I0129 13:00:20.995025 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:00:22 crc kubenswrapper[4852]: I0129 13:00:22.005839 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerStarted","Data":"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b"} Jan 29 13:00:24 crc kubenswrapper[4852]: I0129 13:00:24.030213 4852 generic.go:334] "Generic (PLEG): container finished" podID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerID="c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b" exitCode=0 Jan 29 13:00:24 crc kubenswrapper[4852]: I0129 13:00:24.030297 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerDied","Data":"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b"} Jan 29 13:00:25 crc kubenswrapper[4852]: I0129 13:00:25.043987 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerStarted","Data":"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a"} Jan 29 13:00:25 crc kubenswrapper[4852]: I0129 13:00:25.072538 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7r7xt" podStartSLOduration=3.567657893 podStartE2EDuration="7.07251536s" podCreationTimestamp="2026-01-29 13:00:18 +0000 UTC" firstStartedPulling="2026-01-29 13:00:20.994800872 +0000 UTC m=+8318.212132006" lastFinishedPulling="2026-01-29 13:00:24.499658339 +0000 UTC m=+8321.716989473" observedRunningTime="2026-01-29 13:00:25.066064764 +0000 UTC m=+8322.283395908" watchObservedRunningTime="2026-01-29 13:00:25.07251536 +0000 UTC m=+8322.289846494" Jan 29 13:00:29 crc kubenswrapper[4852]: I0129 13:00:29.171389 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:29 crc kubenswrapper[4852]: I0129 13:00:29.172084 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:29 crc kubenswrapper[4852]: I0129 13:00:29.245889 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:30 crc kubenswrapper[4852]: I0129 13:00:30.149472 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:30 crc kubenswrapper[4852]: I0129 13:00:30.208056 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.112893 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7r7xt" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="registry-server" containerID="cri-o://0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a" gracePeriod=2 Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.636367 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.821166 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities\") pod \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.821371 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlhhf\" (UniqueName: \"kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf\") pod \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.821474 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content\") pod \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\" (UID: \"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7\") " Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.822034 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities" (OuterVolumeSpecName: "utilities") pod "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" (UID: "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.822727 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.832323 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf" (OuterVolumeSpecName: "kube-api-access-vlhhf") pod "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" (UID: "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7"). InnerVolumeSpecName "kube-api-access-vlhhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:00:32 crc kubenswrapper[4852]: I0129 13:00:32.925067 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlhhf\" (UniqueName: \"kubernetes.io/projected/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-kube-api-access-vlhhf\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.125480 4852 generic.go:334] "Generic (PLEG): container finished" podID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerID="0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a" exitCode=0 Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.125544 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerDied","Data":"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a"} Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.125574 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7r7xt" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.125613 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7r7xt" event={"ID":"2f019ca6-3f6f-4a4c-8f82-ab267142eeb7","Type":"ContainerDied","Data":"ff4db70caa2e6c9a868d982a243dbc7c2f4a8b50f1f4d9eb0bcb62567c80d3d9"} Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.125644 4852 scope.go:117] "RemoveContainer" containerID="0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.151569 4852 scope.go:117] "RemoveContainer" containerID="c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.178104 4852 scope.go:117] "RemoveContainer" containerID="6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.246292 4852 scope.go:117] "RemoveContainer" containerID="0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a" Jan 29 13:00:33 crc kubenswrapper[4852]: E0129 13:00:33.246920 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a\": container with ID starting with 0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a not found: ID does not exist" containerID="0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.247008 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a"} err="failed to get container status \"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a\": rpc error: code = NotFound desc = could not find container \"0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a\": container with ID starting with 0135e15dec91b2f0b55cbe79348e52ef6d8b2b0640abc03cf2adaf9e16c1678a not found: ID does not exist" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.247044 4852 scope.go:117] "RemoveContainer" containerID="c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b" Jan 29 13:00:33 crc kubenswrapper[4852]: E0129 13:00:33.248267 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b\": container with ID starting with c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b not found: ID does not exist" containerID="c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.248309 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b"} err="failed to get container status \"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b\": rpc error: code = NotFound desc = could not find container \"c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b\": container with ID starting with c6394831b3045945aa8222680c8436f30e43ed8dfa4dd8461fb6b3f9175fb52b not found: ID does not exist" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.248336 4852 scope.go:117] "RemoveContainer" containerID="6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34" Jan 29 13:00:33 crc kubenswrapper[4852]: E0129 13:00:33.249398 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34\": container with ID starting with 6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34 not found: ID does not exist" containerID="6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.249444 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34"} err="failed to get container status \"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34\": rpc error: code = NotFound desc = could not find container \"6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34\": container with ID starting with 6d0b449e480aa4bbb39aec3419336d3fce9c9065b1ec66410fc7674985a6ca34 not found: ID does not exist" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.380490 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" (UID: "2f019ca6-3f6f-4a4c-8f82-ab267142eeb7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.435817 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.476947 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:33 crc kubenswrapper[4852]: I0129 13:00:33.497132 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7r7xt"] Jan 29 13:00:35 crc kubenswrapper[4852]: I0129 13:00:35.475117 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" path="/var/lib/kubelet/pods/2f019ca6-3f6f-4a4c-8f82-ab267142eeb7/volumes" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.017382 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.017985 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.152794 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29494861-dcvxd"] Jan 29 13:01:00 crc kubenswrapper[4852]: E0129 13:01:00.153259 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="extract-content" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.153277 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="extract-content" Jan 29 13:01:00 crc kubenswrapper[4852]: E0129 13:01:00.153298 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="registry-server" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.153305 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="registry-server" Jan 29 13:01:00 crc kubenswrapper[4852]: E0129 13:01:00.153331 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="extract-utilities" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.153337 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="extract-utilities" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.153531 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f019ca6-3f6f-4a4c-8f82-ab267142eeb7" containerName="registry-server" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.154436 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.164630 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29494861-dcvxd"] Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.300267 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.300526 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.300821 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.301155 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zx52\" (UniqueName: \"kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.403032 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zx52\" (UniqueName: \"kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.403110 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.403177 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.403241 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.409394 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.410441 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.421024 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zx52\" (UniqueName: \"kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.431056 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys\") pod \"keystone-cron-29494861-dcvxd\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.483203 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:00 crc kubenswrapper[4852]: I0129 13:01:00.955884 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29494861-dcvxd"] Jan 29 13:01:01 crc kubenswrapper[4852]: I0129 13:01:01.456317 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494861-dcvxd" event={"ID":"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8","Type":"ContainerStarted","Data":"56616e92665bf17e0ff04411dff2d2f8efc71e90fce5df1d0b7b5fff79f3b20c"} Jan 29 13:01:01 crc kubenswrapper[4852]: I0129 13:01:01.456721 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494861-dcvxd" event={"ID":"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8","Type":"ContainerStarted","Data":"0ce48e4c0428a19486d5bcb5be47b3dd487f95b88a9d65d1d40598f697000f88"} Jan 29 13:01:04 crc kubenswrapper[4852]: I0129 13:01:04.489863 4852 generic.go:334] "Generic (PLEG): container finished" podID="dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" containerID="56616e92665bf17e0ff04411dff2d2f8efc71e90fce5df1d0b7b5fff79f3b20c" exitCode=0 Jan 29 13:01:04 crc kubenswrapper[4852]: I0129 13:01:04.490448 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494861-dcvxd" event={"ID":"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8","Type":"ContainerDied","Data":"56616e92665bf17e0ff04411dff2d2f8efc71e90fce5df1d0b7b5fff79f3b20c"} Jan 29 13:01:05 crc kubenswrapper[4852]: I0129 13:01:05.404508 4852 scope.go:117] "RemoveContainer" containerID="38d05d57b84e632f95a1aa9a5b62365dd85af5e7b6d56ab4dd850ad69f5e07da" Jan 29 13:01:05 crc kubenswrapper[4852]: I0129 13:01:05.922214 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.048020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data\") pod \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.048131 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zx52\" (UniqueName: \"kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52\") pod \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.048295 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle\") pod \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.048442 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys\") pod \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\" (UID: \"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8\") " Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.053675 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52" (OuterVolumeSpecName: "kube-api-access-5zx52") pod "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" (UID: "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8"). InnerVolumeSpecName "kube-api-access-5zx52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.068004 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" (UID: "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.081241 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" (UID: "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.113890 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data" (OuterVolumeSpecName: "config-data") pod "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" (UID: "dbd0f679-8736-4fc7-a3e6-7b0d58e325e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.151012 4852 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.151054 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.151070 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zx52\" (UniqueName: \"kubernetes.io/projected/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-kube-api-access-5zx52\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.151085 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbd0f679-8736-4fc7-a3e6-7b0d58e325e8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.510909 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494861-dcvxd" event={"ID":"dbd0f679-8736-4fc7-a3e6-7b0d58e325e8","Type":"ContainerDied","Data":"0ce48e4c0428a19486d5bcb5be47b3dd487f95b88a9d65d1d40598f697000f88"} Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.510951 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ce48e4c0428a19486d5bcb5be47b3dd487f95b88a9d65d1d40598f697000f88" Jan 29 13:01:06 crc kubenswrapper[4852]: I0129 13:01:06.511034 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494861-dcvxd" Jan 29 13:01:08 crc kubenswrapper[4852]: I0129 13:01:08.536531 4852 generic.go:334] "Generic (PLEG): container finished" podID="90675cdc-8008-4d42-8f75-e0f67752eef4" containerID="d11ee5e1d95f472f0f7fc0ed9a7ce434ecda22ac2e9d1f91d1bce7304f475f93" exitCode=0 Jan 29 13:01:08 crc kubenswrapper[4852]: I0129 13:01:08.536680 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" event={"ID":"90675cdc-8008-4d42-8f75-e0f67752eef4","Type":"ContainerDied","Data":"d11ee5e1d95f472f0f7fc0ed9a7ce434ecda22ac2e9d1f91d1bce7304f475f93"} Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.088750 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244012 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244186 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244328 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244370 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244420 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244500 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p22jp\" (UniqueName: \"kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244618 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244684 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244718 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244771 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.244824 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory\") pod \"90675cdc-8008-4d42-8f75-e0f67752eef4\" (UID: \"90675cdc-8008-4d42-8f75-e0f67752eef4\") " Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.264927 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph" (OuterVolumeSpecName: "ceph") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.265622 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp" (OuterVolumeSpecName: "kube-api-access-p22jp") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "kube-api-access-p22jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.276050 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.284695 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.292907 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.301395 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.301560 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.304916 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.307143 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.319821 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.335773 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory" (OuterVolumeSpecName: "inventory") pod "90675cdc-8008-4d42-8f75-e0f67752eef4" (UID: "90675cdc-8008-4d42-8f75-e0f67752eef4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352162 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352201 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352211 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352222 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352234 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352245 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p22jp\" (UniqueName: \"kubernetes.io/projected/90675cdc-8008-4d42-8f75-e0f67752eef4-kube-api-access-p22jp\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352256 4852 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352267 4852 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352275 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352282 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.352293 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90675cdc-8008-4d42-8f75-e0f67752eef4-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.562872 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" event={"ID":"90675cdc-8008-4d42-8f75-e0f67752eef4","Type":"ContainerDied","Data":"f4bdd6564099a3c56952d07ef151fad821c19e68d1bc025bdf29cac9b2f32c39"} Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.563230 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4bdd6564099a3c56952d07ef151fad821c19e68d1bc025bdf29cac9b2f32c39" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.562976 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-snxf7" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.658552 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-82xj9"] Jan 29 13:01:10 crc kubenswrapper[4852]: E0129 13:01:10.659082 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" containerName="keystone-cron" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.659100 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" containerName="keystone-cron" Jan 29 13:01:10 crc kubenswrapper[4852]: E0129 13:01:10.659130 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90675cdc-8008-4d42-8f75-e0f67752eef4" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.659137 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="90675cdc-8008-4d42-8f75-e0f67752eef4" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.659366 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbd0f679-8736-4fc7-a3e6-7b0d58e325e8" containerName="keystone-cron" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.659389 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="90675cdc-8008-4d42-8f75-e0f67752eef4" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.661023 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.667522 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.667779 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.667892 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.668104 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.668195 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.677351 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-82xj9"] Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.772949 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773012 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773099 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773120 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkzxv\" (UniqueName: \"kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773145 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773192 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773216 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.773274 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879370 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879446 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879484 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879594 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879615 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkzxv\" (UniqueName: \"kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879637 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879714 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.879753 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.887115 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.887226 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.888997 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.889988 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.891309 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.891823 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.894252 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.899147 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkzxv\" (UniqueName: \"kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv\") pod \"telemetry-openstack-openstack-cell1-82xj9\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:10 crc kubenswrapper[4852]: I0129 13:01:10.987864 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:01:11 crc kubenswrapper[4852]: I0129 13:01:11.587465 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-82xj9"] Jan 29 13:01:12 crc kubenswrapper[4852]: I0129 13:01:12.583089 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" event={"ID":"e7651f11-224f-4da6-92f6-fc6322e79044","Type":"ContainerStarted","Data":"4b2bdef1c61ba20237c0b5f936a82e37502f830ac3af56f44c6f9b05d3eca694"} Jan 29 13:01:12 crc kubenswrapper[4852]: I0129 13:01:12.583864 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" event={"ID":"e7651f11-224f-4da6-92f6-fc6322e79044","Type":"ContainerStarted","Data":"8970141adcd1ad1290b36b44a8f195b45b6a06f69d5de96eabff1346c9ac6905"} Jan 29 13:01:12 crc kubenswrapper[4852]: I0129 13:01:12.614422 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" podStartSLOduration=2.056677571 podStartE2EDuration="2.614390914s" podCreationTimestamp="2026-01-29 13:01:10 +0000 UTC" firstStartedPulling="2026-01-29 13:01:11.581491519 +0000 UTC m=+8368.798822643" lastFinishedPulling="2026-01-29 13:01:12.139204842 +0000 UTC m=+8369.356535986" observedRunningTime="2026-01-29 13:01:12.599698697 +0000 UTC m=+8369.817029871" watchObservedRunningTime="2026-01-29 13:01:12.614390914 +0000 UTC m=+8369.831722068" Jan 29 13:01:30 crc kubenswrapper[4852]: I0129 13:01:30.016559 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:01:30 crc kubenswrapper[4852]: I0129 13:01:30.017055 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:02:00 crc kubenswrapper[4852]: I0129 13:02:00.017793 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:02:00 crc kubenswrapper[4852]: I0129 13:02:00.018562 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:02:00 crc kubenswrapper[4852]: I0129 13:02:00.018682 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:02:00 crc kubenswrapper[4852]: I0129 13:02:00.020051 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:02:00 crc kubenswrapper[4852]: I0129 13:02:00.020180 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" gracePeriod=600 Jan 29 13:02:00 crc kubenswrapper[4852]: E0129 13:02:00.167476 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:02:01 crc kubenswrapper[4852]: I0129 13:02:01.139930 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" exitCode=0 Jan 29 13:02:01 crc kubenswrapper[4852]: I0129 13:02:01.140000 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6"} Jan 29 13:02:01 crc kubenswrapper[4852]: I0129 13:02:01.140036 4852 scope.go:117] "RemoveContainer" containerID="429f7a973e23a9b6211e73f4246866dd1a967f8348dcf23a797b492e1c452f76" Jan 29 13:02:01 crc kubenswrapper[4852]: I0129 13:02:01.140961 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:02:01 crc kubenswrapper[4852]: E0129 13:02:01.141807 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:02:15 crc kubenswrapper[4852]: I0129 13:02:15.465151 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:02:15 crc kubenswrapper[4852]: E0129 13:02:15.467234 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:02:27 crc kubenswrapper[4852]: I0129 13:02:27.463184 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:02:27 crc kubenswrapper[4852]: E0129 13:02:27.463862 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:02:38 crc kubenswrapper[4852]: I0129 13:02:38.464557 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:02:38 crc kubenswrapper[4852]: E0129 13:02:38.465366 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:02:52 crc kubenswrapper[4852]: I0129 13:02:52.466344 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:02:52 crc kubenswrapper[4852]: E0129 13:02:52.467791 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:03:04 crc kubenswrapper[4852]: I0129 13:03:04.463708 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:03:04 crc kubenswrapper[4852]: E0129 13:03:04.464409 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:03:05 crc kubenswrapper[4852]: I0129 13:03:05.516251 4852 scope.go:117] "RemoveContainer" containerID="aeb41be860402265afd01444d42c4ec73b13735c0da4e256fe8d81d1261b2396" Jan 29 13:03:05 crc kubenswrapper[4852]: I0129 13:03:05.557843 4852 scope.go:117] "RemoveContainer" containerID="8b93f4670e0f3274957791bd1af1d0c496ab9586168f3bbc6d3a1f5b3555dac5" Jan 29 13:03:05 crc kubenswrapper[4852]: I0129 13:03:05.607682 4852 scope.go:117] "RemoveContainer" containerID="4c5e3a79d0b500faad764ba56c82ff82ce29553b2a6a6fb2296309be440b4d97" Jan 29 13:03:19 crc kubenswrapper[4852]: I0129 13:03:19.464319 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:03:19 crc kubenswrapper[4852]: E0129 13:03:19.465195 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:03:32 crc kubenswrapper[4852]: I0129 13:03:32.464122 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:03:32 crc kubenswrapper[4852]: E0129 13:03:32.465357 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:03:45 crc kubenswrapper[4852]: I0129 13:03:45.464073 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:03:45 crc kubenswrapper[4852]: E0129 13:03:45.464909 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:03:58 crc kubenswrapper[4852]: I0129 13:03:58.465226 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:03:58 crc kubenswrapper[4852]: E0129 13:03:58.466227 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:04:12 crc kubenswrapper[4852]: I0129 13:04:12.464432 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:04:12 crc kubenswrapper[4852]: E0129 13:04:12.465574 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:04:26 crc kubenswrapper[4852]: I0129 13:04:26.464495 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:04:26 crc kubenswrapper[4852]: E0129 13:04:26.465686 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:04:38 crc kubenswrapper[4852]: I0129 13:04:38.464238 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:04:38 crc kubenswrapper[4852]: E0129 13:04:38.465570 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:04:38 crc kubenswrapper[4852]: I0129 13:04:38.955092 4852 generic.go:334] "Generic (PLEG): container finished" podID="e7651f11-224f-4da6-92f6-fc6322e79044" containerID="4b2bdef1c61ba20237c0b5f936a82e37502f830ac3af56f44c6f9b05d3eca694" exitCode=0 Jan 29 13:04:38 crc kubenswrapper[4852]: I0129 13:04:38.955141 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" event={"ID":"e7651f11-224f-4da6-92f6-fc6322e79044","Type":"ContainerDied","Data":"4b2bdef1c61ba20237c0b5f936a82e37502f830ac3af56f44c6f9b05d3eca694"} Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.587778 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748403 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748453 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748473 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748702 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkzxv\" (UniqueName: \"kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748789 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748838 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748863 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.748935 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2\") pod \"e7651f11-224f-4da6-92f6-fc6322e79044\" (UID: \"e7651f11-224f-4da6-92f6-fc6322e79044\") " Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.755217 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv" (OuterVolumeSpecName: "kube-api-access-fkzxv") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "kube-api-access-fkzxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.755221 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.755842 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph" (OuterVolumeSpecName: "ceph") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.791553 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory" (OuterVolumeSpecName: "inventory") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.809123 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.810858 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.822158 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.823248 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "e7651f11-224f-4da6-92f6-fc6322e79044" (UID: "e7651f11-224f-4da6-92f6-fc6322e79044"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851602 4852 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851631 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851643 4852 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851656 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkzxv\" (UniqueName: \"kubernetes.io/projected/e7651f11-224f-4da6-92f6-fc6322e79044-kube-api-access-fkzxv\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851666 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851677 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851686 4852 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.851694 4852 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e7651f11-224f-4da6-92f6-fc6322e79044-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.978264 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" event={"ID":"e7651f11-224f-4da6-92f6-fc6322e79044","Type":"ContainerDied","Data":"8970141adcd1ad1290b36b44a8f195b45b6a06f69d5de96eabff1346c9ac6905"} Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.978556 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8970141adcd1ad1290b36b44a8f195b45b6a06f69d5de96eabff1346c9ac6905" Jan 29 13:04:40 crc kubenswrapper[4852]: I0129 13:04:40.978298 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-82xj9" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.078756 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rpffn"] Jan 29 13:04:41 crc kubenswrapper[4852]: E0129 13:04:41.079336 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7651f11-224f-4da6-92f6-fc6322e79044" containerName="telemetry-openstack-openstack-cell1" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.079357 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7651f11-224f-4da6-92f6-fc6322e79044" containerName="telemetry-openstack-openstack-cell1" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.079560 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7651f11-224f-4da6-92f6-fc6322e79044" containerName="telemetry-openstack-openstack-cell1" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.080296 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.086295 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.086433 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.086448 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.086541 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.086922 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.108176 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rpffn"] Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261213 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261291 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261337 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95cth\" (UniqueName: \"kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261356 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261427 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.261447 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.362787 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.362830 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.362960 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.362992 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.363034 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95cth\" (UniqueName: \"kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.363053 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.367230 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.367430 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.367920 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.368339 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.368900 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.381132 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95cth\" (UniqueName: \"kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth\") pod \"neutron-sriov-openstack-openstack-cell1-rpffn\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.408468 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:04:41 crc kubenswrapper[4852]: I0129 13:04:41.980282 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-rpffn"] Jan 29 13:04:43 crc kubenswrapper[4852]: I0129 13:04:42.999462 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" event={"ID":"82d924c3-a1a8-425d-8ccd-f83a7053b057","Type":"ContainerStarted","Data":"1b5633b3bf9e9dfcb7c45fd8474d1b1091d7f4360b3c283257c83cca9358b27e"} Jan 29 13:04:44 crc kubenswrapper[4852]: I0129 13:04:44.010624 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" event={"ID":"82d924c3-a1a8-425d-8ccd-f83a7053b057","Type":"ContainerStarted","Data":"8af0c1855c19db3b95e836c53d3a6f2cb1a91bee1c9886f2f5ff4546a8421a24"} Jan 29 13:04:44 crc kubenswrapper[4852]: I0129 13:04:44.044857 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" podStartSLOduration=2.214628477 podStartE2EDuration="3.044839098s" podCreationTimestamp="2026-01-29 13:04:41 +0000 UTC" firstStartedPulling="2026-01-29 13:04:41.993203793 +0000 UTC m=+8579.210534927" lastFinishedPulling="2026-01-29 13:04:42.823414374 +0000 UTC m=+8580.040745548" observedRunningTime="2026-01-29 13:04:44.036330601 +0000 UTC m=+8581.253661775" watchObservedRunningTime="2026-01-29 13:04:44.044839098 +0000 UTC m=+8581.262170232" Jan 29 13:04:52 crc kubenswrapper[4852]: I0129 13:04:52.463646 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:04:52 crc kubenswrapper[4852]: E0129 13:04:52.466209 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:03 crc kubenswrapper[4852]: I0129 13:05:03.474746 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:05:03 crc kubenswrapper[4852]: E0129 13:05:03.476411 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:16 crc kubenswrapper[4852]: I0129 13:05:16.463539 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:05:16 crc kubenswrapper[4852]: E0129 13:05:16.464563 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:29 crc kubenswrapper[4852]: I0129 13:05:29.464260 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:05:29 crc kubenswrapper[4852]: E0129 13:05:29.465401 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:41 crc kubenswrapper[4852]: I0129 13:05:41.464176 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:05:41 crc kubenswrapper[4852]: E0129 13:05:41.465567 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:50 crc kubenswrapper[4852]: I0129 13:05:50.900343 4852 generic.go:334] "Generic (PLEG): container finished" podID="82d924c3-a1a8-425d-8ccd-f83a7053b057" containerID="8af0c1855c19db3b95e836c53d3a6f2cb1a91bee1c9886f2f5ff4546a8421a24" exitCode=0 Jan 29 13:05:50 crc kubenswrapper[4852]: I0129 13:05:50.900433 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" event={"ID":"82d924c3-a1a8-425d-8ccd-f83a7053b057","Type":"ContainerDied","Data":"8af0c1855c19db3b95e836c53d3a6f2cb1a91bee1c9886f2f5ff4546a8421a24"} Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.355330 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460743 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460827 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95cth\" (UniqueName: \"kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460848 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460900 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460939 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.460991 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0\") pod \"82d924c3-a1a8-425d-8ccd-f83a7053b057\" (UID: \"82d924c3-a1a8-425d-8ccd-f83a7053b057\") " Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.466697 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph" (OuterVolumeSpecName: "ceph") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.468774 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.477952 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth" (OuterVolumeSpecName: "kube-api-access-95cth") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "kube-api-access-95cth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.494163 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.496705 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory" (OuterVolumeSpecName: "inventory") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.513571 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "82d924c3-a1a8-425d-8ccd-f83a7053b057" (UID: "82d924c3-a1a8-425d-8ccd-f83a7053b057"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566487 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566517 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95cth\" (UniqueName: \"kubernetes.io/projected/82d924c3-a1a8-425d-8ccd-f83a7053b057-kube-api-access-95cth\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566527 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566538 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566555 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.566565 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/82d924c3-a1a8-425d-8ccd-f83a7053b057-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.921063 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" event={"ID":"82d924c3-a1a8-425d-8ccd-f83a7053b057","Type":"ContainerDied","Data":"1b5633b3bf9e9dfcb7c45fd8474d1b1091d7f4360b3c283257c83cca9358b27e"} Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.921113 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b5633b3bf9e9dfcb7c45fd8474d1b1091d7f4360b3c283257c83cca9358b27e" Jan 29 13:05:52 crc kubenswrapper[4852]: I0129 13:05:52.921177 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-rpffn" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.046109 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z"] Jan 29 13:05:53 crc kubenswrapper[4852]: E0129 13:05:53.046559 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82d924c3-a1a8-425d-8ccd-f83a7053b057" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.046579 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="82d924c3-a1a8-425d-8ccd-f83a7053b057" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.046851 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="82d924c3-a1a8-425d-8ccd-f83a7053b057" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.047704 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.050277 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.050696 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.050882 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.050978 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.051017 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.059523 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z"] Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180475 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180650 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dqzg\" (UniqueName: \"kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180775 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180800 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180824 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.180882 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286017 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286187 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dqzg\" (UniqueName: \"kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286332 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286412 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.286485 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.297393 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.315091 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.322495 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.327141 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.332177 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.340236 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dqzg\" (UniqueName: \"kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg\") pod \"neutron-dhcp-openstack-openstack-cell1-bvz6z\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:53 crc kubenswrapper[4852]: I0129 13:05:53.378484 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:05:54 crc kubenswrapper[4852]: I0129 13:05:54.076398 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z"] Jan 29 13:05:54 crc kubenswrapper[4852]: W0129 13:05:54.077060 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0571917_6ca8_4aa5_b046_f20ff3909490.slice/crio-6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586 WatchSource:0}: Error finding container 6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586: Status 404 returned error can't find the container with id 6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586 Jan 29 13:05:54 crc kubenswrapper[4852]: I0129 13:05:54.080635 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:05:54 crc kubenswrapper[4852]: I0129 13:05:54.464464 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:05:54 crc kubenswrapper[4852]: E0129 13:05:54.464981 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:05:54 crc kubenswrapper[4852]: I0129 13:05:54.952700 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" event={"ID":"c0571917-6ca8-4aa5-b046-f20ff3909490","Type":"ContainerStarted","Data":"6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586"} Jan 29 13:05:55 crc kubenswrapper[4852]: I0129 13:05:55.972943 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" event={"ID":"c0571917-6ca8-4aa5-b046-f20ff3909490","Type":"ContainerStarted","Data":"83c7f53d92cedf8826a2e609f9b013554ad2c48889dd78a0ce9b4513a28aab34"} Jan 29 13:05:55 crc kubenswrapper[4852]: I0129 13:05:55.996300 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" podStartSLOduration=2.345290924 podStartE2EDuration="2.996276408s" podCreationTimestamp="2026-01-29 13:05:53 +0000 UTC" firstStartedPulling="2026-01-29 13:05:54.08035714 +0000 UTC m=+8651.297688284" lastFinishedPulling="2026-01-29 13:05:54.731342644 +0000 UTC m=+8651.948673768" observedRunningTime="2026-01-29 13:05:55.992790673 +0000 UTC m=+8653.210121817" watchObservedRunningTime="2026-01-29 13:05:55.996276408 +0000 UTC m=+8653.213607552" Jan 29 13:06:05 crc kubenswrapper[4852]: I0129 13:06:05.464069 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:06:05 crc kubenswrapper[4852]: E0129 13:06:05.465509 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:06:20 crc kubenswrapper[4852]: I0129 13:06:20.465177 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:06:20 crc kubenswrapper[4852]: E0129 13:06:20.465922 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:06:31 crc kubenswrapper[4852]: I0129 13:06:31.464346 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:06:31 crc kubenswrapper[4852]: E0129 13:06:31.465866 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:06:44 crc kubenswrapper[4852]: I0129 13:06:44.463405 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:06:44 crc kubenswrapper[4852]: E0129 13:06:44.464477 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:06:58 crc kubenswrapper[4852]: I0129 13:06:58.462973 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:06:58 crc kubenswrapper[4852]: E0129 13:06:58.463666 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:07:09 crc kubenswrapper[4852]: I0129 13:07:09.822871 4852 generic.go:334] "Generic (PLEG): container finished" podID="c0571917-6ca8-4aa5-b046-f20ff3909490" containerID="83c7f53d92cedf8826a2e609f9b013554ad2c48889dd78a0ce9b4513a28aab34" exitCode=0 Jan 29 13:07:09 crc kubenswrapper[4852]: I0129 13:07:09.822960 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" event={"ID":"c0571917-6ca8-4aa5-b046-f20ff3909490","Type":"ContainerDied","Data":"83c7f53d92cedf8826a2e609f9b013554ad2c48889dd78a0ce9b4513a28aab34"} Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.506659 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.616340 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.617383 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dqzg\" (UniqueName: \"kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.617475 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.617551 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.617689 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.617799 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory\") pod \"c0571917-6ca8-4aa5-b046-f20ff3909490\" (UID: \"c0571917-6ca8-4aa5-b046-f20ff3909490\") " Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.623187 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph" (OuterVolumeSpecName: "ceph") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.626410 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.626878 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg" (OuterVolumeSpecName: "kube-api-access-7dqzg") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "kube-api-access-7dqzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.655703 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.670516 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.679856 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory" (OuterVolumeSpecName: "inventory") pod "c0571917-6ca8-4aa5-b046-f20ff3909490" (UID: "c0571917-6ca8-4aa5-b046-f20ff3909490"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720289 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720328 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720340 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720356 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720370 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dqzg\" (UniqueName: \"kubernetes.io/projected/c0571917-6ca8-4aa5-b046-f20ff3909490-kube-api-access-7dqzg\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.720383 4852 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c0571917-6ca8-4aa5-b046-f20ff3909490-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.849045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" event={"ID":"c0571917-6ca8-4aa5-b046-f20ff3909490","Type":"ContainerDied","Data":"6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586"} Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.849341 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f22b659d86ad3dadd145bd39aaaae446b1175c9a6b84920c2bb476ceb831586" Jan 29 13:07:11 crc kubenswrapper[4852]: I0129 13:07:11.849096 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-bvz6z" Jan 29 13:07:13 crc kubenswrapper[4852]: I0129 13:07:13.471898 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:07:13 crc kubenswrapper[4852]: I0129 13:07:13.873798 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827"} Jan 29 13:07:15 crc kubenswrapper[4852]: I0129 13:07:15.983669 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:15 crc kubenswrapper[4852]: E0129 13:07:15.985523 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0571917-6ca8-4aa5-b046-f20ff3909490" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 13:07:15 crc kubenswrapper[4852]: I0129 13:07:15.985544 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0571917-6ca8-4aa5-b046-f20ff3909490" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 13:07:15 crc kubenswrapper[4852]: I0129 13:07:15.985935 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0571917-6ca8-4aa5-b046-f20ff3909490" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 13:07:15 crc kubenswrapper[4852]: I0129 13:07:15.989490 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.014147 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.116323 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.116363 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.116390 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4jn4\" (UniqueName: \"kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.218390 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.218713 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.218736 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4jn4\" (UniqueName: \"kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.218996 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.219132 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.239420 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4jn4\" (UniqueName: \"kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4\") pod \"community-operators-p7z5r\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.318859 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:16 crc kubenswrapper[4852]: I0129 13:07:16.905905 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:16 crc kubenswrapper[4852]: W0129 13:07:16.907025 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4d3d3fd_3675_4564_9421_5512f6d845c9.slice/crio-2410eb32047e941559b0abdbe0ba55ffa1343ec3fa01018704694589cfd0a35f WatchSource:0}: Error finding container 2410eb32047e941559b0abdbe0ba55ffa1343ec3fa01018704694589cfd0a35f: Status 404 returned error can't find the container with id 2410eb32047e941559b0abdbe0ba55ffa1343ec3fa01018704694589cfd0a35f Jan 29 13:07:17 crc kubenswrapper[4852]: I0129 13:07:17.911533 4852 generic.go:334] "Generic (PLEG): container finished" podID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerID="012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33" exitCode=0 Jan 29 13:07:17 crc kubenswrapper[4852]: I0129 13:07:17.911617 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerDied","Data":"012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33"} Jan 29 13:07:17 crc kubenswrapper[4852]: I0129 13:07:17.911987 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerStarted","Data":"2410eb32047e941559b0abdbe0ba55ffa1343ec3fa01018704694589cfd0a35f"} Jan 29 13:07:18 crc kubenswrapper[4852]: I0129 13:07:18.921465 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerStarted","Data":"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311"} Jan 29 13:07:19 crc kubenswrapper[4852]: I0129 13:07:19.932877 4852 generic.go:334] "Generic (PLEG): container finished" podID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerID="6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311" exitCode=0 Jan 29 13:07:19 crc kubenswrapper[4852]: I0129 13:07:19.932955 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerDied","Data":"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311"} Jan 29 13:07:20 crc kubenswrapper[4852]: I0129 13:07:20.948492 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerStarted","Data":"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a"} Jan 29 13:07:20 crc kubenswrapper[4852]: I0129 13:07:20.976734 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p7z5r" podStartSLOduration=3.432144576 podStartE2EDuration="5.976716252s" podCreationTimestamp="2026-01-29 13:07:15 +0000 UTC" firstStartedPulling="2026-01-29 13:07:17.914803569 +0000 UTC m=+8735.132134703" lastFinishedPulling="2026-01-29 13:07:20.459375245 +0000 UTC m=+8737.676706379" observedRunningTime="2026-01-29 13:07:20.972355906 +0000 UTC m=+8738.189687080" watchObservedRunningTime="2026-01-29 13:07:20.976716252 +0000 UTC m=+8738.194047386" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.366303 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.372120 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.383854 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.497502 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xklbq\" (UniqueName: \"kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.497602 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.497675 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.600114 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xklbq\" (UniqueName: \"kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.600247 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.600335 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.601089 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.601313 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.627279 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xklbq\" (UniqueName: \"kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq\") pod \"redhat-operators-xc2rm\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:23 crc kubenswrapper[4852]: I0129 13:07:23.704426 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:24 crc kubenswrapper[4852]: I0129 13:07:24.790485 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:07:24 crc kubenswrapper[4852]: W0129 13:07:24.806751 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4684f150_6b82_4217_8d50_34d28b356821.slice/crio-85c281650a86f105446bf45c9ef0feac6054a6e8740f09c6aef4ff97511f2a58 WatchSource:0}: Error finding container 85c281650a86f105446bf45c9ef0feac6054a6e8740f09c6aef4ff97511f2a58: Status 404 returned error can't find the container with id 85c281650a86f105446bf45c9ef0feac6054a6e8740f09c6aef4ff97511f2a58 Jan 29 13:07:24 crc kubenswrapper[4852]: I0129 13:07:24.999314 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerStarted","Data":"85c281650a86f105446bf45c9ef0feac6054a6e8740f09c6aef4ff97511f2a58"} Jan 29 13:07:26 crc kubenswrapper[4852]: I0129 13:07:26.014510 4852 generic.go:334] "Generic (PLEG): container finished" podID="4684f150-6b82-4217-8d50-34d28b356821" containerID="27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883" exitCode=0 Jan 29 13:07:26 crc kubenswrapper[4852]: I0129 13:07:26.014701 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerDied","Data":"27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883"} Jan 29 13:07:26 crc kubenswrapper[4852]: I0129 13:07:26.319973 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:26 crc kubenswrapper[4852]: I0129 13:07:26.320411 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:26 crc kubenswrapper[4852]: I0129 13:07:26.370357 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:27 crc kubenswrapper[4852]: I0129 13:07:27.029857 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerStarted","Data":"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c"} Jan 29 13:07:27 crc kubenswrapper[4852]: I0129 13:07:27.096284 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:28 crc kubenswrapper[4852]: I0129 13:07:28.744751 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.056613 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p7z5r" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="registry-server" containerID="cri-o://571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a" gracePeriod=2 Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.606733 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.759861 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content\") pod \"c4d3d3fd-3675-4564-9421-5512f6d845c9\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.759962 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4jn4\" (UniqueName: \"kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4\") pod \"c4d3d3fd-3675-4564-9421-5512f6d845c9\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.760083 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities\") pod \"c4d3d3fd-3675-4564-9421-5512f6d845c9\" (UID: \"c4d3d3fd-3675-4564-9421-5512f6d845c9\") " Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.761346 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities" (OuterVolumeSpecName: "utilities") pod "c4d3d3fd-3675-4564-9421-5512f6d845c9" (UID: "c4d3d3fd-3675-4564-9421-5512f6d845c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.768741 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4" (OuterVolumeSpecName: "kube-api-access-n4jn4") pod "c4d3d3fd-3675-4564-9421-5512f6d845c9" (UID: "c4d3d3fd-3675-4564-9421-5512f6d845c9"). InnerVolumeSpecName "kube-api-access-n4jn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.824278 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c4d3d3fd-3675-4564-9421-5512f6d845c9" (UID: "c4d3d3fd-3675-4564-9421-5512f6d845c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.863135 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.863169 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4jn4\" (UniqueName: \"kubernetes.io/projected/c4d3d3fd-3675-4564-9421-5512f6d845c9-kube-api-access-n4jn4\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:29 crc kubenswrapper[4852]: I0129 13:07:29.863183 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4d3d3fd-3675-4564-9421-5512f6d845c9-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.071301 4852 generic.go:334] "Generic (PLEG): container finished" podID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerID="571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a" exitCode=0 Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.071348 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerDied","Data":"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a"} Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.071379 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p7z5r" event={"ID":"c4d3d3fd-3675-4564-9421-5512f6d845c9","Type":"ContainerDied","Data":"2410eb32047e941559b0abdbe0ba55ffa1343ec3fa01018704694589cfd0a35f"} Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.071399 4852 scope.go:117] "RemoveContainer" containerID="571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.071557 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p7z5r" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.110225 4852 scope.go:117] "RemoveContainer" containerID="6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.116245 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.126674 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p7z5r"] Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.147839 4852 scope.go:117] "RemoveContainer" containerID="012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.193433 4852 scope.go:117] "RemoveContainer" containerID="571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a" Jan 29 13:07:30 crc kubenswrapper[4852]: E0129 13:07:30.193934 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a\": container with ID starting with 571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a not found: ID does not exist" containerID="571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.194047 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a"} err="failed to get container status \"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a\": rpc error: code = NotFound desc = could not find container \"571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a\": container with ID starting with 571e24f13c0d04728843f9d174443cc6b9824c13b89b93b10f2bc0bd73f6983a not found: ID does not exist" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.194143 4852 scope.go:117] "RemoveContainer" containerID="6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311" Jan 29 13:07:30 crc kubenswrapper[4852]: E0129 13:07:30.194537 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311\": container with ID starting with 6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311 not found: ID does not exist" containerID="6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.194597 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311"} err="failed to get container status \"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311\": rpc error: code = NotFound desc = could not find container \"6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311\": container with ID starting with 6fdb1f93dae6068323cd1b78729c64194a51811766a4bf936ada72be71ea1311 not found: ID does not exist" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.194630 4852 scope.go:117] "RemoveContainer" containerID="012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33" Jan 29 13:07:30 crc kubenswrapper[4852]: E0129 13:07:30.194979 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33\": container with ID starting with 012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33 not found: ID does not exist" containerID="012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33" Jan 29 13:07:30 crc kubenswrapper[4852]: I0129 13:07:30.195078 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33"} err="failed to get container status \"012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33\": rpc error: code = NotFound desc = could not find container \"012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33\": container with ID starting with 012b1b883650c40c1fcb1acc5b0abd9201878ddde62b663640db79d6fd250e33 not found: ID does not exist" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.344699 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:31 crc kubenswrapper[4852]: E0129 13:07:31.345638 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="extract-utilities" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.345659 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="extract-utilities" Jan 29 13:07:31 crc kubenswrapper[4852]: E0129 13:07:31.345685 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="extract-content" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.345694 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="extract-content" Jan 29 13:07:31 crc kubenswrapper[4852]: E0129 13:07:31.345715 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="registry-server" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.345725 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="registry-server" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.345990 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" containerName="registry-server" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.347961 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.369325 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.403256 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.403517 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq249\" (UniqueName: \"kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.403709 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.475646 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4d3d3fd-3675-4564-9421-5512f6d845c9" path="/var/lib/kubelet/pods/c4d3d3fd-3675-4564-9421-5512f6d845c9/volumes" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.507510 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq249\" (UniqueName: \"kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.507767 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.507916 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.508562 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.509268 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.550147 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq249\" (UniqueName: \"kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249\") pod \"redhat-marketplace-8r86w\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:31 crc kubenswrapper[4852]: I0129 13:07:31.676467 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:32 crc kubenswrapper[4852]: I0129 13:07:32.235518 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:32 crc kubenswrapper[4852]: W0129 13:07:32.244277 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c8d1249_9234_4e7e_8046_55bac172b611.slice/crio-2c2c5b6a8bb7e1042a086b396d73c29400a8969a4e0a749b39909c22e8ffe240 WatchSource:0}: Error finding container 2c2c5b6a8bb7e1042a086b396d73c29400a8969a4e0a749b39909c22e8ffe240: Status 404 returned error can't find the container with id 2c2c5b6a8bb7e1042a086b396d73c29400a8969a4e0a749b39909c22e8ffe240 Jan 29 13:07:33 crc kubenswrapper[4852]: I0129 13:07:33.108381 4852 generic.go:334] "Generic (PLEG): container finished" podID="9c8d1249-9234-4e7e-8046-55bac172b611" containerID="84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8" exitCode=0 Jan 29 13:07:33 crc kubenswrapper[4852]: I0129 13:07:33.108432 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerDied","Data":"84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8"} Jan 29 13:07:33 crc kubenswrapper[4852]: I0129 13:07:33.108792 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerStarted","Data":"2c2c5b6a8bb7e1042a086b396d73c29400a8969a4e0a749b39909c22e8ffe240"} Jan 29 13:07:34 crc kubenswrapper[4852]: I0129 13:07:34.118882 4852 generic.go:334] "Generic (PLEG): container finished" podID="4684f150-6b82-4217-8d50-34d28b356821" containerID="c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c" exitCode=0 Jan 29 13:07:34 crc kubenswrapper[4852]: I0129 13:07:34.119070 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerDied","Data":"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c"} Jan 29 13:07:35 crc kubenswrapper[4852]: I0129 13:07:35.143463 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerStarted","Data":"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb"} Jan 29 13:07:35 crc kubenswrapper[4852]: I0129 13:07:35.149725 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerStarted","Data":"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb"} Jan 29 13:07:35 crc kubenswrapper[4852]: I0129 13:07:35.177440 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xc2rm" podStartSLOduration=3.648440963 podStartE2EDuration="12.177418628s" podCreationTimestamp="2026-01-29 13:07:23 +0000 UTC" firstStartedPulling="2026-01-29 13:07:26.017272252 +0000 UTC m=+8743.234603376" lastFinishedPulling="2026-01-29 13:07:34.546249907 +0000 UTC m=+8751.763581041" observedRunningTime="2026-01-29 13:07:35.167141518 +0000 UTC m=+8752.384472662" watchObservedRunningTime="2026-01-29 13:07:35.177418628 +0000 UTC m=+8752.394749762" Jan 29 13:07:36 crc kubenswrapper[4852]: I0129 13:07:36.182530 4852 generic.go:334] "Generic (PLEG): container finished" podID="9c8d1249-9234-4e7e-8046-55bac172b611" containerID="1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb" exitCode=0 Jan 29 13:07:36 crc kubenswrapper[4852]: I0129 13:07:36.182842 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerDied","Data":"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb"} Jan 29 13:07:37 crc kubenswrapper[4852]: I0129 13:07:37.196977 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerStarted","Data":"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413"} Jan 29 13:07:37 crc kubenswrapper[4852]: I0129 13:07:37.235419 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8r86w" podStartSLOduration=2.673417498 podStartE2EDuration="6.235400268s" podCreationTimestamp="2026-01-29 13:07:31 +0000 UTC" firstStartedPulling="2026-01-29 13:07:33.110715806 +0000 UTC m=+8750.328046940" lastFinishedPulling="2026-01-29 13:07:36.672698566 +0000 UTC m=+8753.890029710" observedRunningTime="2026-01-29 13:07:37.21865803 +0000 UTC m=+8754.435989174" watchObservedRunningTime="2026-01-29 13:07:37.235400268 +0000 UTC m=+8754.452731392" Jan 29 13:07:39 crc kubenswrapper[4852]: I0129 13:07:39.216409 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:39 crc kubenswrapper[4852]: I0129 13:07:39.216721 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="66a97aee-41b4-454c-a584-9f0195bce766" containerName="nova-cell0-conductor-conductor" containerID="cri-o://020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" gracePeriod=30 Jan 29 13:07:39 crc kubenswrapper[4852]: I0129 13:07:39.260539 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:39 crc kubenswrapper[4852]: I0129 13:07:39.261425 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" containerName="nova-cell1-conductor-conductor" containerID="cri-o://02fd37a25c66dbf7adaf56353b54e230c1f14f0e00caef2447ef34c4fd1f6452" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.118705 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.118921 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" containerID="cri-o://ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.134051 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.134337 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-log" containerID="cri-o://617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.134402 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-api" containerID="cri-o://4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.146123 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.146364 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" containerID="cri-o://9b4f777a246caab0b8394d8c852f616f25a1e6a20801f90e5a01b52249926ed4" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.146417 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" containerID="cri-o://d0e9c5f01424616b0bfadafd00ea22e49f5c98bb771f69a655780f61ba81c303" gracePeriod=30 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.266349 4852 generic.go:334] "Generic (PLEG): container finished" podID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" containerID="02fd37a25c66dbf7adaf56353b54e230c1f14f0e00caef2447ef34c4fd1f6452" exitCode=0 Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.266558 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e607ecc3-25cf-4681-85cc-26f1634e8cdb","Type":"ContainerDied","Data":"02fd37a25c66dbf7adaf56353b54e230c1f14f0e00caef2447ef34c4fd1f6452"} Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.662878 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.708518 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data\") pod \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.708854 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kgpw\" (UniqueName: \"kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw\") pod \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.709022 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle\") pod \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\" (UID: \"e607ecc3-25cf-4681-85cc-26f1634e8cdb\") " Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.714076 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw" (OuterVolumeSpecName: "kube-api-access-4kgpw") pod "e607ecc3-25cf-4681-85cc-26f1634e8cdb" (UID: "e607ecc3-25cf-4681-85cc-26f1634e8cdb"). InnerVolumeSpecName "kube-api-access-4kgpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.731065 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kgpw\" (UniqueName: \"kubernetes.io/projected/e607ecc3-25cf-4681-85cc-26f1634e8cdb-kube-api-access-4kgpw\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.745357 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data" (OuterVolumeSpecName: "config-data") pod "e607ecc3-25cf-4681-85cc-26f1634e8cdb" (UID: "e607ecc3-25cf-4681-85cc-26f1634e8cdb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.753286 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e607ecc3-25cf-4681-85cc-26f1634e8cdb" (UID: "e607ecc3-25cf-4681-85cc-26f1634e8cdb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.832439 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:40 crc kubenswrapper[4852]: I0129 13:07:40.832473 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e607ecc3-25cf-4681-85cc-26f1634e8cdb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.284362 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerID="617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804" exitCode=143 Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.284456 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerDied","Data":"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804"} Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.286114 4852 generic.go:334] "Generic (PLEG): container finished" podID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerID="9b4f777a246caab0b8394d8c852f616f25a1e6a20801f90e5a01b52249926ed4" exitCode=143 Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.286178 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerDied","Data":"9b4f777a246caab0b8394d8c852f616f25a1e6a20801f90e5a01b52249926ed4"} Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.288006 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e607ecc3-25cf-4681-85cc-26f1634e8cdb","Type":"ContainerDied","Data":"7e09026c9dd62eea31a90beb318a5b0bf725149139eecf61110b83d299965584"} Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.288046 4852 scope.go:117] "RemoveContainer" containerID="02fd37a25c66dbf7adaf56353b54e230c1f14f0e00caef2447ef34c4fd1f6452" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.288209 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.340320 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.354751 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.376319 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:41 crc kubenswrapper[4852]: E0129 13:07:41.376864 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" containerName="nova-cell1-conductor-conductor" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.376887 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" containerName="nova-cell1-conductor-conductor" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.377133 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" containerName="nova-cell1-conductor-conductor" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.377936 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.380269 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.389370 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.444684 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.444906 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.445036 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcps6\" (UniqueName: \"kubernetes.io/projected/36a67a7e-1743-4610-9fe3-cb6a394c70a2-kube-api-access-mcps6\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.475212 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e607ecc3-25cf-4681-85cc-26f1634e8cdb" path="/var/lib/kubelet/pods/e607ecc3-25cf-4681-85cc-26f1634e8cdb/volumes" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.547623 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.547929 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.548080 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcps6\" (UniqueName: \"kubernetes.io/projected/36a67a7e-1743-4610-9fe3-cb6a394c70a2-kube-api-access-mcps6\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.554887 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.556279 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a67a7e-1743-4610-9fe3-cb6a394c70a2-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.572225 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcps6\" (UniqueName: \"kubernetes.io/projected/36a67a7e-1743-4610-9fe3-cb6a394c70a2-kube-api-access-mcps6\") pod \"nova-cell1-conductor-0\" (UID: \"36a67a7e-1743-4610-9fe3-cb6a394c70a2\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.677627 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.677686 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:41 crc kubenswrapper[4852]: I0129 13:07:41.706535 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:42 crc kubenswrapper[4852]: E0129 13:07:42.145915 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:42 crc kubenswrapper[4852]: E0129 13:07:42.157736 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:42 crc kubenswrapper[4852]: E0129 13:07:42.180036 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:42 crc kubenswrapper[4852]: E0129 13:07:42.180117 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" Jan 29 13:07:42 crc kubenswrapper[4852]: I0129 13:07:42.260130 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:07:42 crc kubenswrapper[4852]: I0129 13:07:42.333765 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"36a67a7e-1743-4610-9fe3-cb6a394c70a2","Type":"ContainerStarted","Data":"5d836756f53cf7a33efa59346123b741c2786c0a36d346cc1e9a2880ef7ae518"} Jan 29 13:07:42 crc kubenswrapper[4852]: I0129 13:07:42.727547 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-8r86w" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="registry-server" probeResult="failure" output=< Jan 29 13:07:42 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 13:07:42 crc kubenswrapper[4852]: > Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.345847 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"36a67a7e-1743-4610-9fe3-cb6a394c70a2","Type":"ContainerStarted","Data":"85b9d5b8ca327aeb1aab274c209872e1b3000e0d843b5ab2d574d2dd1f4ed135"} Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.347410 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.369957 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.369935215 podStartE2EDuration="2.369935215s" podCreationTimestamp="2026-01-29 13:07:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:07:43.362174767 +0000 UTC m=+8760.579505901" watchObservedRunningTime="2026-01-29 13:07:43.369935215 +0000 UTC m=+8760.587266339" Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.610508 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": read tcp 10.217.0.2:50306->10.217.1.83:8775: read: connection reset by peer" Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.610541 4852 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.83:8775/\": read tcp 10.217.0.2:50318->10.217.1.83:8775: read: connection reset by peer" Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.705541 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:43 crc kubenswrapper[4852]: I0129 13:07:43.705645 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.194990 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.197269 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.199000 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.199050 4852 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="66a97aee-41b4-454c-a584-9f0195bce766" containerName="nova-cell0-conductor-conductor" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.358621 4852 generic.go:334] "Generic (PLEG): container finished" podID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerID="d0e9c5f01424616b0bfadafd00ea22e49f5c98bb771f69a655780f61ba81c303" exitCode=0 Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.358681 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerDied","Data":"d0e9c5f01424616b0bfadafd00ea22e49f5c98bb771f69a655780f61ba81c303"} Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.358757 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7adc97e8-4d27-486c-820f-4b49d6fd095b","Type":"ContainerDied","Data":"e2f1d19d2f18cbbdb942f3fd9f18c75322dcd5c3487f9e628e05a6a54559c270"} Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.358769 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2f1d19d2f18cbbdb942f3fd9f18c75322dcd5c3487f9e628e05a6a54559c270" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.360925 4852 generic.go:334] "Generic (PLEG): container finished" podID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerID="4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a" exitCode=0 Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.360942 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.360956 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerDied","Data":"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a"} Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.361161 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b915d83-fdef-4d3a-8079-26b0cd0a956c","Type":"ContainerDied","Data":"03b10d7a945657830360dc536f019bfaf055245a6d7e3f1542bdde06ccd21d43"} Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.361185 4852 scope.go:117] "RemoveContainer" containerID="4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.368438 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.390552 4852 scope.go:117] "RemoveContainer" containerID="617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.414653 4852 scope.go:117] "RemoveContainer" containerID="4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.415628 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a\": container with ID starting with 4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a not found: ID does not exist" containerID="4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.415669 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a"} err="failed to get container status \"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a\": rpc error: code = NotFound desc = could not find container \"4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a\": container with ID starting with 4dc05278d306b828213c904ebfd13b1a7407397892c5e899fc9e19e6ee4d226a not found: ID does not exist" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.415695 4852 scope.go:117] "RemoveContainer" containerID="617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.417767 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804\": container with ID starting with 617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804 not found: ID does not exist" containerID="617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.417793 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804"} err="failed to get container status \"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804\": rpc error: code = NotFound desc = could not find container \"617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804\": container with ID starting with 617a62d8ef1cfcef5db15637a9ff106b6319b868575492b96a1c492a458fc804 not found: ID does not exist" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424298 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle\") pod \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424396 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs\") pod \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424477 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data\") pod \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424536 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5z4g\" (UniqueName: \"kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g\") pod \"7adc97e8-4d27-486c-820f-4b49d6fd095b\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424574 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle\") pod \"7adc97e8-4d27-486c-820f-4b49d6fd095b\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424634 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb99w\" (UniqueName: \"kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w\") pod \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\" (UID: \"1b915d83-fdef-4d3a-8079-26b0cd0a956c\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424669 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data\") pod \"7adc97e8-4d27-486c-820f-4b49d6fd095b\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.424755 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs\") pod \"7adc97e8-4d27-486c-820f-4b49d6fd095b\" (UID: \"7adc97e8-4d27-486c-820f-4b49d6fd095b\") " Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.425168 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs" (OuterVolumeSpecName: "logs") pod "1b915d83-fdef-4d3a-8079-26b0cd0a956c" (UID: "1b915d83-fdef-4d3a-8079-26b0cd0a956c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.425490 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b915d83-fdef-4d3a-8079-26b0cd0a956c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.432827 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w" (OuterVolumeSpecName: "kube-api-access-rb99w") pod "1b915d83-fdef-4d3a-8079-26b0cd0a956c" (UID: "1b915d83-fdef-4d3a-8079-26b0cd0a956c"). InnerVolumeSpecName "kube-api-access-rb99w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.435244 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g" (OuterVolumeSpecName: "kube-api-access-w5z4g") pod "7adc97e8-4d27-486c-820f-4b49d6fd095b" (UID: "7adc97e8-4d27-486c-820f-4b49d6fd095b"). InnerVolumeSpecName "kube-api-access-w5z4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.437148 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs" (OuterVolumeSpecName: "logs") pod "7adc97e8-4d27-486c-820f-4b49d6fd095b" (UID: "7adc97e8-4d27-486c-820f-4b49d6fd095b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.478940 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data" (OuterVolumeSpecName: "config-data") pod "1b915d83-fdef-4d3a-8079-26b0cd0a956c" (UID: "1b915d83-fdef-4d3a-8079-26b0cd0a956c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.497985 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b915d83-fdef-4d3a-8079-26b0cd0a956c" (UID: "1b915d83-fdef-4d3a-8079-26b0cd0a956c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.508168 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data" (OuterVolumeSpecName: "config-data") pod "7adc97e8-4d27-486c-820f-4b49d6fd095b" (UID: "7adc97e8-4d27-486c-820f-4b49d6fd095b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527267 4852 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7adc97e8-4d27-486c-820f-4b49d6fd095b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527298 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527307 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b915d83-fdef-4d3a-8079-26b0cd0a956c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527315 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5z4g\" (UniqueName: \"kubernetes.io/projected/7adc97e8-4d27-486c-820f-4b49d6fd095b-kube-api-access-w5z4g\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527325 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb99w\" (UniqueName: \"kubernetes.io/projected/1b915d83-fdef-4d3a-8079-26b0cd0a956c-kube-api-access-rb99w\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.527332 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.531738 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7adc97e8-4d27-486c-820f-4b49d6fd095b" (UID: "7adc97e8-4d27-486c-820f-4b49d6fd095b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.629238 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7adc97e8-4d27-486c-820f-4b49d6fd095b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.768189 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xc2rm" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" probeResult="failure" output=< Jan 29 13:07:44 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 13:07:44 crc kubenswrapper[4852]: > Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.876488 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd"] Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.877044 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-api" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877062 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-api" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.877070 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877076 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.877086 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-log" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877093 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-log" Jan 29 13:07:44 crc kubenswrapper[4852]: E0129 13:07:44.877122 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877128 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877324 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-metadata" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877350 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" containerName="nova-metadata-log" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877363 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-api" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.877373 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" containerName="nova-api-log" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.878146 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.881661 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.881891 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.882016 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.882185 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-vmcft" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.882298 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.882418 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.882523 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.893341 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd"] Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937187 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfs4z\" (UniqueName: \"kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937244 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937275 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937308 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937332 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937463 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937547 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937647 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937793 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937847 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:44 crc kubenswrapper[4852]: I0129 13:07:44.937951 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.039982 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfs4z\" (UniqueName: \"kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040353 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040379 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040395 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040415 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040450 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040480 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040510 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040566 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040610 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.040917 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.041454 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.044171 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.044445 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.044459 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.045509 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.055949 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.064114 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.064210 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.064264 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.072734 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfs4z\" (UniqueName: \"kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.198529 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.374510 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.375172 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.463633 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.519881 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.533438 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.547629 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.555040 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.557733 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.559619 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.569483 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.580843 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.582914 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.585839 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.590005 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654166 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a752a8f-23da-4add-9dd1-3d765dd5440e-logs\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654215 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-config-data\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654236 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3724deaf-3c35-42d5-8b3c-f7306b134b15-logs\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654252 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-685v9\" (UniqueName: \"kubernetes.io/projected/8a752a8f-23da-4add-9dd1-3d765dd5440e-kube-api-access-685v9\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654272 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-config-data\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654306 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcqp5\" (UniqueName: \"kubernetes.io/projected/3724deaf-3c35-42d5-8b3c-f7306b134b15-kube-api-access-xcqp5\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654330 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.654349 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.756937 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcqp5\" (UniqueName: \"kubernetes.io/projected/3724deaf-3c35-42d5-8b3c-f7306b134b15-kube-api-access-xcqp5\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757004 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757045 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757276 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a752a8f-23da-4add-9dd1-3d765dd5440e-logs\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757323 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-config-data\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757352 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3724deaf-3c35-42d5-8b3c-f7306b134b15-logs\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-685v9\" (UniqueName: \"kubernetes.io/projected/8a752a8f-23da-4add-9dd1-3d765dd5440e-kube-api-access-685v9\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757408 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-config-data\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.757906 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a752a8f-23da-4add-9dd1-3d765dd5440e-logs\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.758064 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3724deaf-3c35-42d5-8b3c-f7306b134b15-logs\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.766829 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-config-data\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.767083 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3724deaf-3c35-42d5-8b3c-f7306b134b15-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.767357 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.768040 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a752a8f-23da-4add-9dd1-3d765dd5440e-config-data\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.776632 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcqp5\" (UniqueName: \"kubernetes.io/projected/3724deaf-3c35-42d5-8b3c-f7306b134b15-kube-api-access-xcqp5\") pod \"nova-metadata-0\" (UID: \"3724deaf-3c35-42d5-8b3c-f7306b134b15\") " pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.778669 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-685v9\" (UniqueName: \"kubernetes.io/projected/8a752a8f-23da-4add-9dd1-3d765dd5440e-kube-api-access-685v9\") pod \"nova-api-0\" (UID: \"8a752a8f-23da-4add-9dd1-3d765dd5440e\") " pod="openstack/nova-api-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.813659 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd"] Jan 29 13:07:45 crc kubenswrapper[4852]: W0129 13:07:45.818299 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a2c5425_b9b5_4030_9e85_4a935c5d0cd1.slice/crio-027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0 WatchSource:0}: Error finding container 027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0: Status 404 returned error can't find the container with id 027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0 Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.880141 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:07:45 crc kubenswrapper[4852]: I0129 13:07:45.897889 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:07:46 crc kubenswrapper[4852]: W0129 13:07:46.395766 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3724deaf_3c35_42d5_8b3c_f7306b134b15.slice/crio-4e9c9bb1a58c4692d96d8b19bed6b5677ae49a8e2fd638ebae9bfb388c87403b WatchSource:0}: Error finding container 4e9c9bb1a58c4692d96d8b19bed6b5677ae49a8e2fd638ebae9bfb388c87403b: Status 404 returned error can't find the container with id 4e9c9bb1a58c4692d96d8b19bed6b5677ae49a8e2fd638ebae9bfb388c87403b Jan 29 13:07:46 crc kubenswrapper[4852]: I0129 13:07:46.397161 4852 generic.go:334] "Generic (PLEG): container finished" podID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" exitCode=0 Jan 29 13:07:46 crc kubenswrapper[4852]: I0129 13:07:46.397248 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3f441bde-6fef-42f7-8a6d-9cd7eceb019e","Type":"ContainerDied","Data":"ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d"} Jan 29 13:07:46 crc kubenswrapper[4852]: I0129 13:07:46.400290 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" event={"ID":"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1","Type":"ContainerStarted","Data":"027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0"} Jan 29 13:07:46 crc kubenswrapper[4852]: I0129 13:07:46.403153 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:07:46 crc kubenswrapper[4852]: I0129 13:07:46.544321 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:07:47 crc kubenswrapper[4852]: E0129 13:07:47.137051 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d is running failed: container process not found" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:47 crc kubenswrapper[4852]: E0129 13:07:47.137848 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d is running failed: container process not found" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:47 crc kubenswrapper[4852]: E0129 13:07:47.138230 4852 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d is running failed: container process not found" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:07:47 crc kubenswrapper[4852]: E0129 13:07:47.138317 4852 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.166919 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.300328 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle\") pod \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.300453 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data\") pod \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.300486 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d9rn\" (UniqueName: \"kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn\") pod \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\" (UID: \"3f441bde-6fef-42f7-8a6d-9cd7eceb019e\") " Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.307943 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn" (OuterVolumeSpecName: "kube-api-access-2d9rn") pod "3f441bde-6fef-42f7-8a6d-9cd7eceb019e" (UID: "3f441bde-6fef-42f7-8a6d-9cd7eceb019e"). InnerVolumeSpecName "kube-api-access-2d9rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.343120 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f441bde-6fef-42f7-8a6d-9cd7eceb019e" (UID: "3f441bde-6fef-42f7-8a6d-9cd7eceb019e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.380079 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data" (OuterVolumeSpecName: "config-data") pod "3f441bde-6fef-42f7-8a6d-9cd7eceb019e" (UID: "3f441bde-6fef-42f7-8a6d-9cd7eceb019e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.405135 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.405169 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.405179 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d9rn\" (UniqueName: \"kubernetes.io/projected/3f441bde-6fef-42f7-8a6d-9cd7eceb019e-kube-api-access-2d9rn\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.418003 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3724deaf-3c35-42d5-8b3c-f7306b134b15","Type":"ContainerStarted","Data":"88c4b8ffbed36346631bee6063965ac79b0ff24fbe19fcb79fad4812cd103ce1"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.418060 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3724deaf-3c35-42d5-8b3c-f7306b134b15","Type":"ContainerStarted","Data":"4c9976d2fb066c545ab9d0cdc8b248f1ed4e02a009f7c8968ff7b37cbea944a8"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.418075 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3724deaf-3c35-42d5-8b3c-f7306b134b15","Type":"ContainerStarted","Data":"4e9c9bb1a58c4692d96d8b19bed6b5677ae49a8e2fd638ebae9bfb388c87403b"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.423638 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" event={"ID":"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1","Type":"ContainerStarted","Data":"d3fe81906b02abc25be9a89d58bfd50672983269e36dcca09c58192a2cd8b861"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.425123 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8a752a8f-23da-4add-9dd1-3d765dd5440e","Type":"ContainerStarted","Data":"86b3a5ea1a0af833855648e09612305b90c9dd1472bc7f7d0bda8c5becd172bc"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.425146 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8a752a8f-23da-4add-9dd1-3d765dd5440e","Type":"ContainerStarted","Data":"6b656d50a6f80d89351800c22e62a6b583ccd7f9c3485547e3e8f8b64a050271"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.430748 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3f441bde-6fef-42f7-8a6d-9cd7eceb019e","Type":"ContainerDied","Data":"968f6727dfec0caa675fb3c89cad632d351db65c3ef86a7cb6ec11a8149a8d5d"} Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.430805 4852 scope.go:117] "RemoveContainer" containerID="ae84dfe980f40dd99676d6b80486e15e7ed985e658d60e85e95cdf1c6add2f6d" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.430823 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.466656 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.466634915 podStartE2EDuration="2.466634915s" podCreationTimestamp="2026-01-29 13:07:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:07:47.455789981 +0000 UTC m=+8764.673121115" watchObservedRunningTime="2026-01-29 13:07:47.466634915 +0000 UTC m=+8764.683966049" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.479441 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b915d83-fdef-4d3a-8079-26b0cd0a956c" path="/var/lib/kubelet/pods/1b915d83-fdef-4d3a-8079-26b0cd0a956c/volumes" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.480310 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7adc97e8-4d27-486c-820f-4b49d6fd095b" path="/var/lib/kubelet/pods/7adc97e8-4d27-486c-820f-4b49d6fd095b/volumes" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.492475 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" podStartSLOduration=2.9864245240000002 podStartE2EDuration="3.492458944s" podCreationTimestamp="2026-01-29 13:07:44 +0000 UTC" firstStartedPulling="2026-01-29 13:07:45.820658516 +0000 UTC m=+8763.037989650" lastFinishedPulling="2026-01-29 13:07:46.326692936 +0000 UTC m=+8763.544024070" observedRunningTime="2026-01-29 13:07:47.490987409 +0000 UTC m=+8764.708318583" watchObservedRunningTime="2026-01-29 13:07:47.492458944 +0000 UTC m=+8764.709790078" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.510936 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.526156 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.533932 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:47 crc kubenswrapper[4852]: E0129 13:07:47.536688 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.536715 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.536978 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" containerName="nova-scheduler-scheduler" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.537751 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.540338 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.549641 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.712247 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-config-data\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.712320 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bwnw\" (UniqueName: \"kubernetes.io/projected/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-kube-api-access-7bwnw\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.712345 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.816125 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-config-data\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.816245 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bwnw\" (UniqueName: \"kubernetes.io/projected/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-kube-api-access-7bwnw\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.816276 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.835063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-config-data\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.837689 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.839395 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bwnw\" (UniqueName: \"kubernetes.io/projected/c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841-kube-api-access-7bwnw\") pod \"nova-scheduler-0\" (UID: \"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841\") " pod="openstack/nova-scheduler-0" Jan 29 13:07:47 crc kubenswrapper[4852]: I0129 13:07:47.862742 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.373215 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:07:48 crc kubenswrapper[4852]: W0129 13:07:48.389158 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4f2ec21_7302_4fa0_8ceb_0a9cdfa15841.slice/crio-b5681e52bbef0e896b12f43198f73b157078102de1169071d6845a1a9a9ae28d WatchSource:0}: Error finding container b5681e52bbef0e896b12f43198f73b157078102de1169071d6845a1a9a9ae28d: Status 404 returned error can't find the container with id b5681e52bbef0e896b12f43198f73b157078102de1169071d6845a1a9a9ae28d Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.453257 4852 generic.go:334] "Generic (PLEG): container finished" podID="66a97aee-41b4-454c-a584-9f0195bce766" containerID="020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" exitCode=0 Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.453347 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"66a97aee-41b4-454c-a584-9f0195bce766","Type":"ContainerDied","Data":"020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa"} Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.454947 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8a752a8f-23da-4add-9dd1-3d765dd5440e","Type":"ContainerStarted","Data":"aba8638472017c97d4241abdf589aecefefb97e71d824fd4a118178d8ce2b271"} Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.459441 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841","Type":"ContainerStarted","Data":"b5681e52bbef0e896b12f43198f73b157078102de1169071d6845a1a9a9ae28d"} Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.485979 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.520416 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.520392923 podStartE2EDuration="3.520392923s" podCreationTimestamp="2026-01-29 13:07:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:07:48.478563274 +0000 UTC m=+8765.695894408" watchObservedRunningTime="2026-01-29 13:07:48.520392923 +0000 UTC m=+8765.737724057" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.636396 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data\") pod \"66a97aee-41b4-454c-a584-9f0195bce766\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.636675 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm4tz\" (UniqueName: \"kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz\") pod \"66a97aee-41b4-454c-a584-9f0195bce766\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.636711 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle\") pod \"66a97aee-41b4-454c-a584-9f0195bce766\" (UID: \"66a97aee-41b4-454c-a584-9f0195bce766\") " Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.642459 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz" (OuterVolumeSpecName: "kube-api-access-sm4tz") pod "66a97aee-41b4-454c-a584-9f0195bce766" (UID: "66a97aee-41b4-454c-a584-9f0195bce766"). InnerVolumeSpecName "kube-api-access-sm4tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.666241 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data" (OuterVolumeSpecName: "config-data") pod "66a97aee-41b4-454c-a584-9f0195bce766" (UID: "66a97aee-41b4-454c-a584-9f0195bce766"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.675241 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66a97aee-41b4-454c-a584-9f0195bce766" (UID: "66a97aee-41b4-454c-a584-9f0195bce766"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.739805 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm4tz\" (UniqueName: \"kubernetes.io/projected/66a97aee-41b4-454c-a584-9f0195bce766-kube-api-access-sm4tz\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.739842 4852 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:48 crc kubenswrapper[4852]: I0129 13:07:48.739853 4852 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a97aee-41b4-454c-a584-9f0195bce766-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.475718 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f441bde-6fef-42f7-8a6d-9cd7eceb019e" path="/var/lib/kubelet/pods/3f441bde-6fef-42f7-8a6d-9cd7eceb019e/volumes" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.484291 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.484300 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"66a97aee-41b4-454c-a584-9f0195bce766","Type":"ContainerDied","Data":"a8367f59189b421e33b330cb2134bc2c72e2e2c820769783be595eb5fd33393b"} Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.484434 4852 scope.go:117] "RemoveContainer" containerID="020adf4c8abada2f8a7d83b755eb0701069cfba71634cdddf88cb78865792ffa" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.489023 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841","Type":"ContainerStarted","Data":"c9adc49ab0d1bf5992668a1968eaf23ac66d0bb657768bfa3d80ff265590095c"} Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.523926 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.523907627 podStartE2EDuration="2.523907627s" podCreationTimestamp="2026-01-29 13:07:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:07:49.519001197 +0000 UTC m=+8766.736332341" watchObservedRunningTime="2026-01-29 13:07:49.523907627 +0000 UTC m=+8766.741238761" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.548722 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.565188 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.577692 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:49 crc kubenswrapper[4852]: E0129 13:07:49.578190 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a97aee-41b4-454c-a584-9f0195bce766" containerName="nova-cell0-conductor-conductor" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.578208 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a97aee-41b4-454c-a584-9f0195bce766" containerName="nova-cell0-conductor-conductor" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.578449 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a97aee-41b4-454c-a584-9f0195bce766" containerName="nova-cell0-conductor-conductor" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.579260 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.581055 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.606823 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.662062 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.662164 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.662230 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsv4q\" (UniqueName: \"kubernetes.io/projected/f31f0e59-612f-4180-ab59-98f0e79985f0-kube-api-access-vsv4q\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.764099 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsv4q\" (UniqueName: \"kubernetes.io/projected/f31f0e59-612f-4180-ab59-98f0e79985f0-kube-api-access-vsv4q\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.764320 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.764395 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.771325 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.774347 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f31f0e59-612f-4180-ab59-98f0e79985f0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.805478 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsv4q\" (UniqueName: \"kubernetes.io/projected/f31f0e59-612f-4180-ab59-98f0e79985f0-kube-api-access-vsv4q\") pod \"nova-cell0-conductor-0\" (UID: \"f31f0e59-612f-4180-ab59-98f0e79985f0\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:49 crc kubenswrapper[4852]: I0129 13:07:49.908552 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:50 crc kubenswrapper[4852]: I0129 13:07:50.409869 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:07:50 crc kubenswrapper[4852]: W0129 13:07:50.412891 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf31f0e59_612f_4180_ab59_98f0e79985f0.slice/crio-f6cd4f4f5146e85a22215a43e415613192fad24dbd0a4ae0e46677503b7f1832 WatchSource:0}: Error finding container f6cd4f4f5146e85a22215a43e415613192fad24dbd0a4ae0e46677503b7f1832: Status 404 returned error can't find the container with id f6cd4f4f5146e85a22215a43e415613192fad24dbd0a4ae0e46677503b7f1832 Jan 29 13:07:50 crc kubenswrapper[4852]: I0129 13:07:50.502451 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f31f0e59-612f-4180-ab59-98f0e79985f0","Type":"ContainerStarted","Data":"f6cd4f4f5146e85a22215a43e415613192fad24dbd0a4ae0e46677503b7f1832"} Jan 29 13:07:50 crc kubenswrapper[4852]: I0129 13:07:50.880853 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:07:50 crc kubenswrapper[4852]: I0129 13:07:50.881206 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.474600 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a97aee-41b4-454c-a584-9f0195bce766" path="/var/lib/kubelet/pods/66a97aee-41b4-454c-a584-9f0195bce766/volumes" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.514650 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f31f0e59-612f-4180-ab59-98f0e79985f0","Type":"ContainerStarted","Data":"675005a86b90152abf2fbe3b80f58d80cc6efc2a321168d4f96dc5b52a37fab3"} Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.515820 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.540678 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.540660552 podStartE2EDuration="2.540660552s" podCreationTimestamp="2026-01-29 13:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:07:51.534387799 +0000 UTC m=+8768.751718933" watchObservedRunningTime="2026-01-29 13:07:51.540660552 +0000 UTC m=+8768.757991686" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.726304 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.747093 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 13:07:51 crc kubenswrapper[4852]: I0129 13:07:51.789055 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:52 crc kubenswrapper[4852]: I0129 13:07:52.273026 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:52 crc kubenswrapper[4852]: I0129 13:07:52.865749 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 13:07:53 crc kubenswrapper[4852]: I0129 13:07:53.558166 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8r86w" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="registry-server" containerID="cri-o://29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413" gracePeriod=2 Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.216055 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.281943 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content\") pod \"9c8d1249-9234-4e7e-8046-55bac172b611\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.282032 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities\") pod \"9c8d1249-9234-4e7e-8046-55bac172b611\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.282236 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rq249\" (UniqueName: \"kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249\") pod \"9c8d1249-9234-4e7e-8046-55bac172b611\" (UID: \"9c8d1249-9234-4e7e-8046-55bac172b611\") " Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.283244 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities" (OuterVolumeSpecName: "utilities") pod "9c8d1249-9234-4e7e-8046-55bac172b611" (UID: "9c8d1249-9234-4e7e-8046-55bac172b611"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.295911 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249" (OuterVolumeSpecName: "kube-api-access-rq249") pod "9c8d1249-9234-4e7e-8046-55bac172b611" (UID: "9c8d1249-9234-4e7e-8046-55bac172b611"). InnerVolumeSpecName "kube-api-access-rq249". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.312019 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c8d1249-9234-4e7e-8046-55bac172b611" (UID: "9c8d1249-9234-4e7e-8046-55bac172b611"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.384884 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rq249\" (UniqueName: \"kubernetes.io/projected/9c8d1249-9234-4e7e-8046-55bac172b611-kube-api-access-rq249\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.384927 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.384940 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c8d1249-9234-4e7e-8046-55bac172b611-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.570957 4852 generic.go:334] "Generic (PLEG): container finished" podID="9c8d1249-9234-4e7e-8046-55bac172b611" containerID="29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413" exitCode=0 Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.571015 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerDied","Data":"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413"} Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.571046 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8r86w" event={"ID":"9c8d1249-9234-4e7e-8046-55bac172b611","Type":"ContainerDied","Data":"2c2c5b6a8bb7e1042a086b396d73c29400a8969a4e0a749b39909c22e8ffe240"} Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.571054 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8r86w" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.571071 4852 scope.go:117] "RemoveContainer" containerID="29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.598704 4852 scope.go:117] "RemoveContainer" containerID="1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.649606 4852 scope.go:117] "RemoveContainer" containerID="84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.649751 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.660762 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8r86w"] Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.673845 4852 scope.go:117] "RemoveContainer" containerID="29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413" Jan 29 13:07:54 crc kubenswrapper[4852]: E0129 13:07:54.676390 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413\": container with ID starting with 29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413 not found: ID does not exist" containerID="29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.676541 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413"} err="failed to get container status \"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413\": rpc error: code = NotFound desc = could not find container \"29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413\": container with ID starting with 29995d033468672ce1aed2b74c8b317ad9e69f27a6b89dd66af47733decb0413 not found: ID does not exist" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.676676 4852 scope.go:117] "RemoveContainer" containerID="1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb" Jan 29 13:07:54 crc kubenswrapper[4852]: E0129 13:07:54.677087 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb\": container with ID starting with 1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb not found: ID does not exist" containerID="1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.677178 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb"} err="failed to get container status \"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb\": rpc error: code = NotFound desc = could not find container \"1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb\": container with ID starting with 1fb1ed2c3f3f6acd822fa8306696810bdafa1234f87ed6485a9f721731f866eb not found: ID does not exist" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.677255 4852 scope.go:117] "RemoveContainer" containerID="84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8" Jan 29 13:07:54 crc kubenswrapper[4852]: E0129 13:07:54.677563 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8\": container with ID starting with 84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8 not found: ID does not exist" containerID="84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.677692 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8"} err="failed to get container status \"84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8\": rpc error: code = NotFound desc = could not find container \"84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8\": container with ID starting with 84d30bcb67810d88307738c52fc2b68fbeaf943cc39d182b465b7bf0d0c456a8 not found: ID does not exist" Jan 29 13:07:54 crc kubenswrapper[4852]: I0129 13:07:54.801911 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xc2rm" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" probeResult="failure" output=< Jan 29 13:07:54 crc kubenswrapper[4852]: timeout: failed to connect service ":50051" within 1s Jan 29 13:07:54 crc kubenswrapper[4852]: > Jan 29 13:07:55 crc kubenswrapper[4852]: I0129 13:07:55.476745 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" path="/var/lib/kubelet/pods/9c8d1249-9234-4e7e-8046-55bac172b611/volumes" Jan 29 13:07:55 crc kubenswrapper[4852]: I0129 13:07:55.880953 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:07:55 crc kubenswrapper[4852]: I0129 13:07:55.880994 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:07:55 crc kubenswrapper[4852]: I0129 13:07:55.901144 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:07:55 crc kubenswrapper[4852]: I0129 13:07:55.901199 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.048868 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3724deaf-3c35-42d5-8b3c-f7306b134b15" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.191:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.048872 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8a752a8f-23da-4add-9dd1-3d765dd5440e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.048906 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3724deaf-3c35-42d5-8b3c-f7306b134b15" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.191:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.048924 4852 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8a752a8f-23da-4add-9dd1-3d765dd5440e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.865134 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 13:07:57 crc kubenswrapper[4852]: I0129 13:07:57.939209 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 13:07:58 crc kubenswrapper[4852]: I0129 13:07:58.681264 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 13:07:59 crc kubenswrapper[4852]: I0129 13:07:59.945205 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 13:08:03 crc kubenswrapper[4852]: I0129 13:08:03.779556 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:08:03 crc kubenswrapper[4852]: I0129 13:08:03.838982 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:08:04 crc kubenswrapper[4852]: I0129 13:08:04.020162 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.747372 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xc2rm" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" containerID="cri-o://d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb" gracePeriod=2 Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.793445 4852 scope.go:117] "RemoveContainer" containerID="9b4f777a246caab0b8394d8c852f616f25a1e6a20801f90e5a01b52249926ed4" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.833054 4852 scope.go:117] "RemoveContainer" containerID="d0e9c5f01424616b0bfadafd00ea22e49f5c98bb771f69a655780f61ba81c303" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.894233 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.896549 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.902856 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.903666 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.905965 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.906860 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:08:05 crc kubenswrapper[4852]: I0129 13:08:05.911838 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.306476 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.351677 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xklbq\" (UniqueName: \"kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq\") pod \"4684f150-6b82-4217-8d50-34d28b356821\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.351779 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities\") pod \"4684f150-6b82-4217-8d50-34d28b356821\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.351925 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content\") pod \"4684f150-6b82-4217-8d50-34d28b356821\" (UID: \"4684f150-6b82-4217-8d50-34d28b356821\") " Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.357824 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities" (OuterVolumeSpecName: "utilities") pod "4684f150-6b82-4217-8d50-34d28b356821" (UID: "4684f150-6b82-4217-8d50-34d28b356821"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.365961 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq" (OuterVolumeSpecName: "kube-api-access-xklbq") pod "4684f150-6b82-4217-8d50-34d28b356821" (UID: "4684f150-6b82-4217-8d50-34d28b356821"). InnerVolumeSpecName "kube-api-access-xklbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.454518 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xklbq\" (UniqueName: \"kubernetes.io/projected/4684f150-6b82-4217-8d50-34d28b356821-kube-api-access-xklbq\") on node \"crc\" DevicePath \"\"" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.454765 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.465481 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4684f150-6b82-4217-8d50-34d28b356821" (UID: "4684f150-6b82-4217-8d50-34d28b356821"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.557343 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4684f150-6b82-4217-8d50-34d28b356821-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.757278 4852 generic.go:334] "Generic (PLEG): container finished" podID="4684f150-6b82-4217-8d50-34d28b356821" containerID="d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb" exitCode=0 Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.757401 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerDied","Data":"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb"} Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.757452 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xc2rm" event={"ID":"4684f150-6b82-4217-8d50-34d28b356821","Type":"ContainerDied","Data":"85c281650a86f105446bf45c9ef0feac6054a6e8740f09c6aef4ff97511f2a58"} Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.757488 4852 scope.go:117] "RemoveContainer" containerID="d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.758751 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xc2rm" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.759350 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.765544 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.766905 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.788409 4852 scope.go:117] "RemoveContainer" containerID="c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.831412 4852 scope.go:117] "RemoveContainer" containerID="27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.850837 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.875632 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xc2rm"] Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.918823 4852 scope.go:117] "RemoveContainer" containerID="d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb" Jan 29 13:08:06 crc kubenswrapper[4852]: E0129 13:08:06.919458 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb\": container with ID starting with d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb not found: ID does not exist" containerID="d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.919522 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb"} err="failed to get container status \"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb\": rpc error: code = NotFound desc = could not find container \"d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb\": container with ID starting with d236ee5bc8ec429692ed34e993fb1971bf05e7801222d719ea8824a21b739dcb not found: ID does not exist" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.919549 4852 scope.go:117] "RemoveContainer" containerID="c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c" Jan 29 13:08:06 crc kubenswrapper[4852]: E0129 13:08:06.919826 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c\": container with ID starting with c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c not found: ID does not exist" containerID="c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.919859 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c"} err="failed to get container status \"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c\": rpc error: code = NotFound desc = could not find container \"c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c\": container with ID starting with c1295a908590fd197e128602ae5c2a2f146f46fa1f5f67672908c8e3efc9490c not found: ID does not exist" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.919880 4852 scope.go:117] "RemoveContainer" containerID="27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883" Jan 29 13:08:06 crc kubenswrapper[4852]: E0129 13:08:06.920125 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883\": container with ID starting with 27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883 not found: ID does not exist" containerID="27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883" Jan 29 13:08:06 crc kubenswrapper[4852]: I0129 13:08:06.920156 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883"} err="failed to get container status \"27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883\": rpc error: code = NotFound desc = could not find container \"27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883\": container with ID starting with 27f553a0f84999e51686a3346aac6ee4fe96292e836bbb67c3b57849bda48883 not found: ID does not exist" Jan 29 13:08:07 crc kubenswrapper[4852]: I0129 13:08:07.477787 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4684f150-6b82-4217-8d50-34d28b356821" path="/var/lib/kubelet/pods/4684f150-6b82-4217-8d50-34d28b356821/volumes" Jan 29 13:09:30 crc kubenswrapper[4852]: I0129 13:09:30.017559 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:09:30 crc kubenswrapper[4852]: I0129 13:09:30.018251 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:10:00 crc kubenswrapper[4852]: I0129 13:10:00.017127 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:10:00 crc kubenswrapper[4852]: I0129 13:10:00.017833 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.016806 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.019728 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.019977 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.021923 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.022261 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827" gracePeriod=600 Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.459424 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827" exitCode=0 Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.459497 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827"} Jan 29 13:10:30 crc kubenswrapper[4852]: I0129 13:10:30.459731 4852 scope.go:117] "RemoveContainer" containerID="ee2e1b5fd2b3724945896d0e85c63b183e0df5f3a35e10376eb348e216acc6f6" Jan 29 13:10:31 crc kubenswrapper[4852]: I0129 13:10:31.481508 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8"} Jan 29 13:10:48 crc kubenswrapper[4852]: I0129 13:10:48.678787 4852 generic.go:334] "Generic (PLEG): container finished" podID="6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" containerID="d3fe81906b02abc25be9a89d58bfd50672983269e36dcca09c58192a2cd8b861" exitCode=0 Jan 29 13:10:48 crc kubenswrapper[4852]: I0129 13:10:48.678908 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" event={"ID":"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1","Type":"ContainerDied","Data":"d3fe81906b02abc25be9a89d58bfd50672983269e36dcca09c58192a2cd8b861"} Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.262003 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425100 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfs4z\" (UniqueName: \"kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425160 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425221 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425338 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425366 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425409 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425483 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425509 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425558 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425663 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.425762 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0\") pod \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\" (UID: \"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1\") " Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.433222 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z" (OuterVolumeSpecName: "kube-api-access-wfs4z") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "kube-api-access-wfs4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.435837 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph" (OuterVolumeSpecName: "ceph") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.442771 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.457932 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.461137 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.463868 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.479902 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.483511 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory" (OuterVolumeSpecName: "inventory") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.492441 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.495722 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.496788 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" (UID: "6a2c5425-b9b5-4030-9e85-4a935c5d0cd1"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528008 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528045 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528068 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfs4z\" (UniqueName: \"kubernetes.io/projected/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-kube-api-access-wfs4z\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528092 4852 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ceph\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528106 4852 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528118 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528129 4852 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528141 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528154 4852 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528167 4852 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.528178 4852 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6a2c5425-b9b5-4030-9e85-4a935c5d0cd1-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.705041 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" event={"ID":"6a2c5425-b9b5-4030-9e85-4a935c5d0cd1","Type":"ContainerDied","Data":"027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0"} Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.705089 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="027fc2459705274e572faca17c054d5dacc0373cdd69180c57d77d7b84e97cd0" Jan 29 13:10:50 crc kubenswrapper[4852]: I0129 13:10:50.705158 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.224968 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225800 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="extract-content" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225811 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="extract-content" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225828 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="extract-utilities" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225834 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="extract-utilities" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225859 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225867 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225885 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="extract-content" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225890 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="extract-content" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225903 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225908 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225923 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225930 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 13:11:19 crc kubenswrapper[4852]: E0129 13:11:19.225941 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="extract-utilities" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.225946 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="extract-utilities" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.226113 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a2c5425-b9b5-4030-9e85-4a935c5d0cd1" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.226139 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c8d1249-9234-4e7e-8046-55bac172b611" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.226152 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="4684f150-6b82-4217-8d50-34d28b356821" containerName="registry-server" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.233022 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.249276 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.330806 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqg8t\" (UniqueName: \"kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.330951 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.330978 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.433441 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqg8t\" (UniqueName: \"kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.433564 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.433612 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.434212 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.434263 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.458455 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqg8t\" (UniqueName: \"kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t\") pod \"certified-operators-dl65l\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:19 crc kubenswrapper[4852]: I0129 13:11:19.550396 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:20 crc kubenswrapper[4852]: I0129 13:11:20.105954 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:21 crc kubenswrapper[4852]: I0129 13:11:21.065357 4852 generic.go:334] "Generic (PLEG): container finished" podID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerID="1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686" exitCode=0 Jan 29 13:11:21 crc kubenswrapper[4852]: I0129 13:11:21.065543 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerDied","Data":"1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686"} Jan 29 13:11:21 crc kubenswrapper[4852]: I0129 13:11:21.065677 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerStarted","Data":"157a32b538e2685028f7c6af3ec644c4aeb3fc00d7f4d6fa60710a57ce4b4c99"} Jan 29 13:11:21 crc kubenswrapper[4852]: I0129 13:11:21.067763 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:11:23 crc kubenswrapper[4852]: I0129 13:11:23.101287 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerStarted","Data":"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575"} Jan 29 13:11:25 crc kubenswrapper[4852]: I0129 13:11:25.122994 4852 generic.go:334] "Generic (PLEG): container finished" podID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerID="c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575" exitCode=0 Jan 29 13:11:25 crc kubenswrapper[4852]: I0129 13:11:25.123059 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerDied","Data":"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575"} Jan 29 13:11:26 crc kubenswrapper[4852]: I0129 13:11:26.137775 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerStarted","Data":"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6"} Jan 29 13:11:26 crc kubenswrapper[4852]: I0129 13:11:26.170468 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dl65l" podStartSLOduration=2.541446743 podStartE2EDuration="7.170443634s" podCreationTimestamp="2026-01-29 13:11:19 +0000 UTC" firstStartedPulling="2026-01-29 13:11:21.067500304 +0000 UTC m=+8978.284831438" lastFinishedPulling="2026-01-29 13:11:25.696497185 +0000 UTC m=+8982.913828329" observedRunningTime="2026-01-29 13:11:26.160965403 +0000 UTC m=+8983.378296557" watchObservedRunningTime="2026-01-29 13:11:26.170443634 +0000 UTC m=+8983.387774778" Jan 29 13:11:29 crc kubenswrapper[4852]: I0129 13:11:29.550462 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:29 crc kubenswrapper[4852]: I0129 13:11:29.551078 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:29 crc kubenswrapper[4852]: I0129 13:11:29.610007 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:30 crc kubenswrapper[4852]: I0129 13:11:30.247053 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:30 crc kubenswrapper[4852]: I0129 13:11:30.337644 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.205695 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dl65l" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="registry-server" containerID="cri-o://0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6" gracePeriod=2 Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.741981 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.890913 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities\") pod \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.890963 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqg8t\" (UniqueName: \"kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t\") pod \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.891244 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content\") pod \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\" (UID: \"92e8e9b5-f8ae-4866-967c-ffb376c2ec44\") " Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.892446 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities" (OuterVolumeSpecName: "utilities") pod "92e8e9b5-f8ae-4866-967c-ffb376c2ec44" (UID: "92e8e9b5-f8ae-4866-967c-ffb376c2ec44"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.903918 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t" (OuterVolumeSpecName: "kube-api-access-mqg8t") pod "92e8e9b5-f8ae-4866-967c-ffb376c2ec44" (UID: "92e8e9b5-f8ae-4866-967c-ffb376c2ec44"). InnerVolumeSpecName "kube-api-access-mqg8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.955179 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92e8e9b5-f8ae-4866-967c-ffb376c2ec44" (UID: "92e8e9b5-f8ae-4866-967c-ffb376c2ec44"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.994214 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.994251 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqg8t\" (UniqueName: \"kubernetes.io/projected/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-kube-api-access-mqg8t\") on node \"crc\" DevicePath \"\"" Jan 29 13:11:32 crc kubenswrapper[4852]: I0129 13:11:32.994262 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e8e9b5-f8ae-4866-967c-ffb376c2ec44-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.217017 4852 generic.go:334] "Generic (PLEG): container finished" podID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerID="0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6" exitCode=0 Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.217056 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerDied","Data":"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6"} Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.217086 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dl65l" event={"ID":"92e8e9b5-f8ae-4866-967c-ffb376c2ec44","Type":"ContainerDied","Data":"157a32b538e2685028f7c6af3ec644c4aeb3fc00d7f4d6fa60710a57ce4b4c99"} Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.217089 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dl65l" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.217121 4852 scope.go:117] "RemoveContainer" containerID="0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.241235 4852 scope.go:117] "RemoveContainer" containerID="c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.262262 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.287132 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dl65l"] Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.291177 4852 scope.go:117] "RemoveContainer" containerID="1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.336939 4852 scope.go:117] "RemoveContainer" containerID="0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6" Jan 29 13:11:33 crc kubenswrapper[4852]: E0129 13:11:33.337430 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6\": container with ID starting with 0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6 not found: ID does not exist" containerID="0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.337461 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6"} err="failed to get container status \"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6\": rpc error: code = NotFound desc = could not find container \"0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6\": container with ID starting with 0c6b05eea4fa1e296d3dfc779d024c485a899efb141565cd3a5f141b844812c6 not found: ID does not exist" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.337487 4852 scope.go:117] "RemoveContainer" containerID="c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575" Jan 29 13:11:33 crc kubenswrapper[4852]: E0129 13:11:33.337843 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575\": container with ID starting with c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575 not found: ID does not exist" containerID="c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.337867 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575"} err="failed to get container status \"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575\": rpc error: code = NotFound desc = could not find container \"c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575\": container with ID starting with c64996f17fb0bb87c0718de7b417370e4e413672b03f4698c5d87977a77fd575 not found: ID does not exist" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.337885 4852 scope.go:117] "RemoveContainer" containerID="1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686" Jan 29 13:11:33 crc kubenswrapper[4852]: E0129 13:11:33.338198 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686\": container with ID starting with 1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686 not found: ID does not exist" containerID="1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.338231 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686"} err="failed to get container status \"1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686\": rpc error: code = NotFound desc = could not find container \"1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686\": container with ID starting with 1c3fbb56eb5ccebc71cbc8b7f4ee5957df1914dd8ae62baab2829f4df39a3686 not found: ID does not exist" Jan 29 13:11:33 crc kubenswrapper[4852]: I0129 13:11:33.475452 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" path="/var/lib/kubelet/pods/92e8e9b5-f8ae-4866-967c-ffb376c2ec44/volumes" Jan 29 13:12:30 crc kubenswrapper[4852]: I0129 13:12:30.016760 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:12:30 crc kubenswrapper[4852]: I0129 13:12:30.017373 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:13:00 crc kubenswrapper[4852]: I0129 13:13:00.017362 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:13:00 crc kubenswrapper[4852]: I0129 13:13:00.018010 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:13:16 crc kubenswrapper[4852]: I0129 13:13:16.375502 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 13:13:16 crc kubenswrapper[4852]: I0129 13:13:16.376271 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" containerName="adoption" containerID="cri-o://b273d55a9a2199aca554638e48d079d9579814affcd6e894bd8fa9ee054f94e0" gracePeriod=30 Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.017765 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.018521 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.018626 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.019888 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.019986 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" gracePeriod=600 Jan 29 13:13:30 crc kubenswrapper[4852]: E0129 13:13:30.181739 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.587545 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" exitCode=0 Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.587630 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8"} Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.587724 4852 scope.go:117] "RemoveContainer" containerID="1356af181d318e1b32512cd5ec8240ba40dc76d2162f9efebacefb4290325827" Jan 29 13:13:30 crc kubenswrapper[4852]: I0129 13:13:30.589113 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:13:30 crc kubenswrapper[4852]: E0129 13:13:30.589458 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:13:45 crc kubenswrapper[4852]: I0129 13:13:45.463556 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:13:45 crc kubenswrapper[4852]: E0129 13:13:45.464517 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:13:46 crc kubenswrapper[4852]: I0129 13:13:46.769825 4852 generic.go:334] "Generic (PLEG): container finished" podID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" containerID="b273d55a9a2199aca554638e48d079d9579814affcd6e894bd8fa9ee054f94e0" exitCode=137 Jan 29 13:13:46 crc kubenswrapper[4852]: I0129 13:13:46.769936 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2","Type":"ContainerDied","Data":"b273d55a9a2199aca554638e48d079d9579814affcd6e894bd8fa9ee054f94e0"} Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.041824 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.094939 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") pod \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.095257 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf5gh\" (UniqueName: \"kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh\") pod \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\" (UID: \"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2\") " Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.106424 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh" (OuterVolumeSpecName: "kube-api-access-gf5gh") pod "6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" (UID: "6d441f0f-dfdf-478b-b6fc-aac1663ad0d2"). InnerVolumeSpecName "kube-api-access-gf5gh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.126929 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e" (OuterVolumeSpecName: "mariadb-data") pod "6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" (UID: "6d441f0f-dfdf-478b-b6fc-aac1663ad0d2"). InnerVolumeSpecName "pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.198077 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf5gh\" (UniqueName: \"kubernetes.io/projected/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2-kube-api-access-gf5gh\") on node \"crc\" DevicePath \"\"" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.198207 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") on node \"crc\" " Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.263754 4852 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.263909 4852 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e") on node "crc" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.300315 4852 reconciler_common.go:293] "Volume detached for volume \"pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-df1cbf56-e7ad-4dff-8275-5d389c3f168e\") on node \"crc\" DevicePath \"\"" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.788257 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"6d441f0f-dfdf-478b-b6fc-aac1663ad0d2","Type":"ContainerDied","Data":"7852247428f467bbbfaaa9ff550dc910160e6c11d9315fda89886b4a0b69dbc7"} Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.788312 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.788328 4852 scope.go:117] "RemoveContainer" containerID="b273d55a9a2199aca554638e48d079d9579814affcd6e894bd8fa9ee054f94e0" Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.825430 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 13:13:47 crc kubenswrapper[4852]: I0129 13:13:47.835454 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 13:13:48 crc kubenswrapper[4852]: I0129 13:13:48.433703 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 13:13:48 crc kubenswrapper[4852]: I0129 13:13:48.434168 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="d8fdece6-3b51-4de5-88f4-f32be06904da" containerName="adoption" containerID="cri-o://2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3" gracePeriod=30 Jan 29 13:13:49 crc kubenswrapper[4852]: I0129 13:13:49.482362 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" path="/var/lib/kubelet/pods/6d441f0f-dfdf-478b-b6fc-aac1663ad0d2/volumes" Jan 29 13:13:57 crc kubenswrapper[4852]: I0129 13:13:57.463272 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:13:57 crc kubenswrapper[4852]: E0129 13:13:57.464141 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:14:11 crc kubenswrapper[4852]: I0129 13:14:11.464272 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:14:11 crc kubenswrapper[4852]: E0129 13:14:11.465557 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:14:18 crc kubenswrapper[4852]: I0129 13:14:18.978158 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.138859 4852 generic.go:334] "Generic (PLEG): container finished" podID="d8fdece6-3b51-4de5-88f4-f32be06904da" containerID="2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3" exitCode=137 Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.138918 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d8fdece6-3b51-4de5-88f4-f32be06904da","Type":"ContainerDied","Data":"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3"} Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.138962 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d8fdece6-3b51-4de5-88f4-f32be06904da","Type":"ContainerDied","Data":"896dd0e82fa371cf9a548496fc86b05934940b95fe9e2114c4f40e789cca0159"} Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.138968 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.138984 4852 scope.go:117] "RemoveContainer" containerID="2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.149090 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptxhn\" (UniqueName: \"kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn\") pod \"d8fdece6-3b51-4de5-88f4-f32be06904da\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.149178 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert\") pod \"d8fdece6-3b51-4de5-88f4-f32be06904da\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.150417 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") pod \"d8fdece6-3b51-4de5-88f4-f32be06904da\" (UID: \"d8fdece6-3b51-4de5-88f4-f32be06904da\") " Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.157616 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "d8fdece6-3b51-4de5-88f4-f32be06904da" (UID: "d8fdece6-3b51-4de5-88f4-f32be06904da"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.158169 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn" (OuterVolumeSpecName: "kube-api-access-ptxhn") pod "d8fdece6-3b51-4de5-88f4-f32be06904da" (UID: "d8fdece6-3b51-4de5-88f4-f32be06904da"). InnerVolumeSpecName "kube-api-access-ptxhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.167308 4852 scope.go:117] "RemoveContainer" containerID="2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3" Jan 29 13:14:19 crc kubenswrapper[4852]: E0129 13:14:19.167746 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3\": container with ID starting with 2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3 not found: ID does not exist" containerID="2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.167885 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3"} err="failed to get container status \"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3\": rpc error: code = NotFound desc = could not find container \"2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3\": container with ID starting with 2d97b69da675cc9575c503ccdf2395266e3021e931581fa595d13ed64b3713f3 not found: ID does not exist" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.170863 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c" (OuterVolumeSpecName: "ovn-data") pod "d8fdece6-3b51-4de5-88f4-f32be06904da" (UID: "d8fdece6-3b51-4de5-88f4-f32be06904da"). InnerVolumeSpecName "pvc-d7127355-e659-41e0-adcc-6acf78ace48c". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.254362 4852 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") on node \"crc\" " Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.254500 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptxhn\" (UniqueName: \"kubernetes.io/projected/d8fdece6-3b51-4de5-88f4-f32be06904da-kube-api-access-ptxhn\") on node \"crc\" DevicePath \"\"" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.254560 4852 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d8fdece6-3b51-4de5-88f4-f32be06904da-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.282377 4852 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.282601 4852 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d7127355-e659-41e0-adcc-6acf78ace48c" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c") on node "crc" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.356838 4852 reconciler_common.go:293] "Volume detached for volume \"pvc-d7127355-e659-41e0-adcc-6acf78ace48c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d7127355-e659-41e0-adcc-6acf78ace48c\") on node \"crc\" DevicePath \"\"" Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.478305 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 13:14:19 crc kubenswrapper[4852]: I0129 13:14:19.488965 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 13:14:21 crc kubenswrapper[4852]: I0129 13:14:21.480563 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8fdece6-3b51-4de5-88f4-f32be06904da" path="/var/lib/kubelet/pods/d8fdece6-3b51-4de5-88f4-f32be06904da/volumes" Jan 29 13:14:24 crc kubenswrapper[4852]: I0129 13:14:24.465310 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:14:24 crc kubenswrapper[4852]: E0129 13:14:24.466694 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:14:35 crc kubenswrapper[4852]: I0129 13:14:35.464035 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:14:35 crc kubenswrapper[4852]: E0129 13:14:35.464788 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:14:46 crc kubenswrapper[4852]: I0129 13:14:46.464074 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:14:46 crc kubenswrapper[4852]: E0129 13:14:46.464828 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:14:57 crc kubenswrapper[4852]: I0129 13:14:57.463880 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:14:57 crc kubenswrapper[4852]: E0129 13:14:57.464650 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.156253 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd"] Jan 29 13:15:00 crc kubenswrapper[4852]: E0129 13:15:00.157150 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8fdece6-3b51-4de5-88f4-f32be06904da" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157169 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8fdece6-3b51-4de5-88f4-f32be06904da" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: E0129 13:15:00.157185 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="registry-server" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157194 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="registry-server" Jan 29 13:15:00 crc kubenswrapper[4852]: E0129 13:15:00.157218 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157227 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: E0129 13:15:00.157246 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="extract-utilities" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157254 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="extract-utilities" Jan 29 13:15:00 crc kubenswrapper[4852]: E0129 13:15:00.157293 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="extract-content" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157302 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="extract-content" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157546 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="92e8e9b5-f8ae-4866-967c-ffb376c2ec44" containerName="registry-server" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157568 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d441f0f-dfdf-478b-b6fc-aac1663ad0d2" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.157607 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8fdece6-3b51-4de5-88f4-f32be06904da" containerName="adoption" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.158487 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.160446 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.165495 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.167495 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd"] Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.281284 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vvw2\" (UniqueName: \"kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.281369 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.281445 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.383605 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vvw2\" (UniqueName: \"kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.383658 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.383729 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.384732 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.390471 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.399758 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vvw2\" (UniqueName: \"kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2\") pod \"collect-profiles-29494875-6vchd\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.541498 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:00 crc kubenswrapper[4852]: I0129 13:15:00.999388 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd"] Jan 29 13:15:01 crc kubenswrapper[4852]: W0129 13:15:01.002202 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb89d6ec5_a3ab_4d87_a52d_65e293e0755c.slice/crio-5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49 WatchSource:0}: Error finding container 5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49: Status 404 returned error can't find the container with id 5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49 Jan 29 13:15:01 crc kubenswrapper[4852]: I0129 13:15:01.600521 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" event={"ID":"b89d6ec5-a3ab-4d87-a52d-65e293e0755c","Type":"ContainerStarted","Data":"d4ff1b6aee23b433a24a3abd96da13f6a186b52412cb58bd7c3e6f51d68ea192"} Jan 29 13:15:01 crc kubenswrapper[4852]: I0129 13:15:01.600844 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" event={"ID":"b89d6ec5-a3ab-4d87-a52d-65e293e0755c","Type":"ContainerStarted","Data":"5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49"} Jan 29 13:15:01 crc kubenswrapper[4852]: I0129 13:15:01.636485 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" podStartSLOduration=1.6364552909999999 podStartE2EDuration="1.636455291s" podCreationTimestamp="2026-01-29 13:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:15:01.614864765 +0000 UTC m=+9198.832195919" watchObservedRunningTime="2026-01-29 13:15:01.636455291 +0000 UTC m=+9198.853786465" Jan 29 13:15:02 crc kubenswrapper[4852]: I0129 13:15:02.612284 4852 generic.go:334] "Generic (PLEG): container finished" podID="b89d6ec5-a3ab-4d87-a52d-65e293e0755c" containerID="d4ff1b6aee23b433a24a3abd96da13f6a186b52412cb58bd7c3e6f51d68ea192" exitCode=0 Jan 29 13:15:02 crc kubenswrapper[4852]: I0129 13:15:02.612385 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" event={"ID":"b89d6ec5-a3ab-4d87-a52d-65e293e0755c","Type":"ContainerDied","Data":"d4ff1b6aee23b433a24a3abd96da13f6a186b52412cb58bd7c3e6f51d68ea192"} Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.035232 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.170656 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vvw2\" (UniqueName: \"kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2\") pod \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.170718 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume\") pod \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.170763 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume\") pod \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\" (UID: \"b89d6ec5-a3ab-4d87-a52d-65e293e0755c\") " Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.171942 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume" (OuterVolumeSpecName: "config-volume") pod "b89d6ec5-a3ab-4d87-a52d-65e293e0755c" (UID: "b89d6ec5-a3ab-4d87-a52d-65e293e0755c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.176625 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b89d6ec5-a3ab-4d87-a52d-65e293e0755c" (UID: "b89d6ec5-a3ab-4d87-a52d-65e293e0755c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.181867 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2" (OuterVolumeSpecName: "kube-api-access-6vvw2") pod "b89d6ec5-a3ab-4d87-a52d-65e293e0755c" (UID: "b89d6ec5-a3ab-4d87-a52d-65e293e0755c"). InnerVolumeSpecName "kube-api-access-6vvw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.273402 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vvw2\" (UniqueName: \"kubernetes.io/projected/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-kube-api-access-6vvw2\") on node \"crc\" DevicePath \"\"" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.273441 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.273450 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b89d6ec5-a3ab-4d87-a52d-65e293e0755c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.632539 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" event={"ID":"b89d6ec5-a3ab-4d87-a52d-65e293e0755c","Type":"ContainerDied","Data":"5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49"} Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.632583 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fe3ba464368a4b947c3ab3051bae1685828c95f47e4d8de42437b2a4567db49" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.632609 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-6vchd" Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.765314 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn"] Jan 29 13:15:04 crc kubenswrapper[4852]: I0129 13:15:04.775411 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-mbppn"] Jan 29 13:15:05 crc kubenswrapper[4852]: I0129 13:15:05.475077 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="697e0b44-2a41-46c2-88a1-5a95d72a2dd5" path="/var/lib/kubelet/pods/697e0b44-2a41-46c2-88a1-5a95d72a2dd5/volumes" Jan 29 13:15:06 crc kubenswrapper[4852]: I0129 13:15:06.516140 4852 scope.go:117] "RemoveContainer" containerID="8347f6c52e121d04edfdcb013d68b8741eeabff4fa4a1d2773b17e42188a0b83" Jan 29 13:15:10 crc kubenswrapper[4852]: I0129 13:15:10.464109 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:15:10 crc kubenswrapper[4852]: E0129 13:15:10.464976 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.206775 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w4bvc/must-gather-zdtbg"] Jan 29 13:15:16 crc kubenswrapper[4852]: E0129 13:15:16.207810 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b89d6ec5-a3ab-4d87-a52d-65e293e0755c" containerName="collect-profiles" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.207829 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b89d6ec5-a3ab-4d87-a52d-65e293e0755c" containerName="collect-profiles" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.208064 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b89d6ec5-a3ab-4d87-a52d-65e293e0755c" containerName="collect-profiles" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.209403 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.211390 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w4bvc"/"openshift-service-ca.crt" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.211573 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-w4bvc"/"default-dockercfg-9dfbw" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.212088 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w4bvc"/"kube-root-ca.crt" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.223640 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w4bvc/must-gather-zdtbg"] Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.249835 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85bxz\" (UniqueName: \"kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.249925 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.351129 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85bxz\" (UniqueName: \"kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.351418 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.352034 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.371767 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85bxz\" (UniqueName: \"kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz\") pod \"must-gather-zdtbg\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:16 crc kubenswrapper[4852]: I0129 13:15:16.533516 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:15:17 crc kubenswrapper[4852]: I0129 13:15:17.015518 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w4bvc/must-gather-zdtbg"] Jan 29 13:15:17 crc kubenswrapper[4852]: I0129 13:15:17.749825 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" event={"ID":"de6244cd-ee28-45e9-92f8-03b9dbbd3417","Type":"ContainerStarted","Data":"410e80e056bcc62e7c0bca13787f6746cdccec53810ee7ffbb332f80b11abbb5"} Jan 29 13:15:24 crc kubenswrapper[4852]: I0129 13:15:24.464429 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:15:24 crc kubenswrapper[4852]: E0129 13:15:24.465447 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:15:27 crc kubenswrapper[4852]: I0129 13:15:27.872278 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" event={"ID":"de6244cd-ee28-45e9-92f8-03b9dbbd3417","Type":"ContainerStarted","Data":"df8eef2d2c445b16716fd7d1fe702d7ee220b0ea68bf63c512ba59564a585acc"} Jan 29 13:15:28 crc kubenswrapper[4852]: I0129 13:15:28.884586 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" event={"ID":"de6244cd-ee28-45e9-92f8-03b9dbbd3417","Type":"ContainerStarted","Data":"da0eaa4369b00b83a661c988d1b15e49cdc8978942231d54c22ea7be2db8540f"} Jan 29 13:15:28 crc kubenswrapper[4852]: I0129 13:15:28.906466 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" podStartSLOduration=2.539712286 podStartE2EDuration="12.906447005s" podCreationTimestamp="2026-01-29 13:15:16 +0000 UTC" firstStartedPulling="2026-01-29 13:15:17.032105438 +0000 UTC m=+9214.249436572" lastFinishedPulling="2026-01-29 13:15:27.398840157 +0000 UTC m=+9224.616171291" observedRunningTime="2026-01-29 13:15:28.904100208 +0000 UTC m=+9226.121431342" watchObservedRunningTime="2026-01-29 13:15:28.906447005 +0000 UTC m=+9226.123778149" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.004513 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-w62wx"] Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.006721 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.116692 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sw9n\" (UniqueName: \"kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.116904 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.219753 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sw9n\" (UniqueName: \"kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.219892 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.220030 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.255783 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sw9n\" (UniqueName: \"kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n\") pod \"crc-debug-w62wx\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:33 crc kubenswrapper[4852]: I0129 13:15:33.328010 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:15:34 crc kubenswrapper[4852]: I0129 13:15:34.034620 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" event={"ID":"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c","Type":"ContainerStarted","Data":"147608f714cc4815e02417d128823404be1c1a872cdc50d7e88525042c2dfc9b"} Jan 29 13:15:37 crc kubenswrapper[4852]: I0129 13:15:37.463652 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:15:37 crc kubenswrapper[4852]: E0129 13:15:37.464336 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:15:48 crc kubenswrapper[4852]: I0129 13:15:48.197677 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" event={"ID":"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c","Type":"ContainerStarted","Data":"aa36fd9413902fe5e0dce7036c0bc154941f796d0e85df24ed7f46e02b38b8b9"} Jan 29 13:15:49 crc kubenswrapper[4852]: I0129 13:15:49.463485 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:15:49 crc kubenswrapper[4852]: E0129 13:15:49.464394 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:16:04 crc kubenswrapper[4852]: I0129 13:16:04.463570 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:16:04 crc kubenswrapper[4852]: E0129 13:16:04.464337 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:16:18 crc kubenswrapper[4852]: I0129 13:16:18.463955 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:16:18 crc kubenswrapper[4852]: E0129 13:16:18.464795 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:16:18 crc kubenswrapper[4852]: I0129 13:16:18.533140 4852 generic.go:334] "Generic (PLEG): container finished" podID="e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" containerID="aa36fd9413902fe5e0dce7036c0bc154941f796d0e85df24ed7f46e02b38b8b9" exitCode=0 Jan 29 13:16:18 crc kubenswrapper[4852]: I0129 13:16:18.533184 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" event={"ID":"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c","Type":"ContainerDied","Data":"aa36fd9413902fe5e0dce7036c0bc154941f796d0e85df24ed7f46e02b38b8b9"} Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.324352 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.370907 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-w62wx"] Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.383138 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-w62wx"] Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.396353 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sw9n\" (UniqueName: \"kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n\") pod \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.396632 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host\") pod \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\" (UID: \"e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c\") " Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.396674 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host" (OuterVolumeSpecName: "host") pod "e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" (UID: "e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.397089 4852 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-host\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.417307 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n" (OuterVolumeSpecName: "kube-api-access-5sw9n") pod "e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" (UID: "e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c"). InnerVolumeSpecName "kube-api-access-5sw9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.499244 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sw9n\" (UniqueName: \"kubernetes.io/projected/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c-kube-api-access-5sw9n\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.552234 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="147608f714cc4815e02417d128823404be1c1a872cdc50d7e88525042c2dfc9b" Jan 29 13:16:20 crc kubenswrapper[4852]: I0129 13:16:20.552279 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-w62wx" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.477288 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" path="/var/lib/kubelet/pods/e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c/volumes" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.616210 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-sfrqt"] Jan 29 13:16:21 crc kubenswrapper[4852]: E0129 13:16:21.616740 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" containerName="container-00" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.616762 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" containerName="container-00" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.617053 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8f6785a-a3d3-40bb-84c2-cd386e2bbe1c" containerName="container-00" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.624345 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.722928 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.723005 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqvb5\" (UniqueName: \"kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.825230 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.825284 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqvb5\" (UniqueName: \"kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.825765 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.845014 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqvb5\" (UniqueName: \"kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5\") pod \"crc-debug-sfrqt\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:21 crc kubenswrapper[4852]: I0129 13:16:21.949610 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:22 crc kubenswrapper[4852]: I0129 13:16:22.571128 4852 generic.go:334] "Generic (PLEG): container finished" podID="0335cb72-f006-46d4-a731-46677bc4b11c" containerID="5727f4b0c045b6a3210661f5d834bf708d9a8c67ffaa80ab8a1fc1409aaa6ddb" exitCode=1 Jan 29 13:16:22 crc kubenswrapper[4852]: I0129 13:16:22.571225 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" event={"ID":"0335cb72-f006-46d4-a731-46677bc4b11c","Type":"ContainerDied","Data":"5727f4b0c045b6a3210661f5d834bf708d9a8c67ffaa80ab8a1fc1409aaa6ddb"} Jan 29 13:16:22 crc kubenswrapper[4852]: I0129 13:16:22.571483 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" event={"ID":"0335cb72-f006-46d4-a731-46677bc4b11c","Type":"ContainerStarted","Data":"dd5249ff52c7f2d44f0850209997e8cdfa8941e919a87bd91c3c6a51ad37925c"} Jan 29 13:16:22 crc kubenswrapper[4852]: I0129 13:16:22.617358 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-sfrqt"] Jan 29 13:16:22 crc kubenswrapper[4852]: I0129 13:16:22.628490 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w4bvc/crc-debug-sfrqt"] Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.728606 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.764906 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqvb5\" (UniqueName: \"kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5\") pod \"0335cb72-f006-46d4-a731-46677bc4b11c\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.764990 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host\") pod \"0335cb72-f006-46d4-a731-46677bc4b11c\" (UID: \"0335cb72-f006-46d4-a731-46677bc4b11c\") " Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.765178 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host" (OuterVolumeSpecName: "host") pod "0335cb72-f006-46d4-a731-46677bc4b11c" (UID: "0335cb72-f006-46d4-a731-46677bc4b11c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.765762 4852 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0335cb72-f006-46d4-a731-46677bc4b11c-host\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.770875 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5" (OuterVolumeSpecName: "kube-api-access-cqvb5") pod "0335cb72-f006-46d4-a731-46677bc4b11c" (UID: "0335cb72-f006-46d4-a731-46677bc4b11c"). InnerVolumeSpecName "kube-api-access-cqvb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4852]: I0129 13:16:23.868507 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqvb5\" (UniqueName: \"kubernetes.io/projected/0335cb72-f006-46d4-a731-46677bc4b11c-kube-api-access-cqvb5\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4852]: I0129 13:16:24.595358 4852 scope.go:117] "RemoveContainer" containerID="5727f4b0c045b6a3210661f5d834bf708d9a8c67ffaa80ab8a1fc1409aaa6ddb" Jan 29 13:16:24 crc kubenswrapper[4852]: I0129 13:16:24.595391 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/crc-debug-sfrqt" Jan 29 13:16:25 crc kubenswrapper[4852]: I0129 13:16:25.477928 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0335cb72-f006-46d4-a731-46677bc4b11c" path="/var/lib/kubelet/pods/0335cb72-f006-46d4-a731-46677bc4b11c/volumes" Jan 29 13:16:32 crc kubenswrapper[4852]: I0129 13:16:32.463319 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:16:32 crc kubenswrapper[4852]: E0129 13:16:32.464142 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:16:46 crc kubenswrapper[4852]: I0129 13:16:46.463892 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:16:46 crc kubenswrapper[4852]: E0129 13:16:46.464832 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:17:01 crc kubenswrapper[4852]: I0129 13:17:01.463804 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:17:01 crc kubenswrapper[4852]: E0129 13:17:01.464728 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:17:13 crc kubenswrapper[4852]: I0129 13:17:13.471126 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:17:13 crc kubenswrapper[4852]: E0129 13:17:13.472052 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:17:26 crc kubenswrapper[4852]: I0129 13:17:26.463835 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:17:26 crc kubenswrapper[4852]: E0129 13:17:26.464756 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.307520 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:28 crc kubenswrapper[4852]: E0129 13:17:28.308444 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0335cb72-f006-46d4-a731-46677bc4b11c" containerName="container-00" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.308461 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="0335cb72-f006-46d4-a731-46677bc4b11c" containerName="container-00" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.308756 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="0335cb72-f006-46d4-a731-46677bc4b11c" containerName="container-00" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.310911 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.341505 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.437299 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.437371 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2htrm\" (UniqueName: \"kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.437437 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.539456 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.539514 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2htrm\" (UniqueName: \"kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.539540 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.540063 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.540198 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.559343 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2htrm\" (UniqueName: \"kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm\") pod \"redhat-operators-mrz4g\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:28 crc kubenswrapper[4852]: I0129 13:17:28.642888 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:29 crc kubenswrapper[4852]: I0129 13:17:29.407323 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:30 crc kubenswrapper[4852]: I0129 13:17:30.355505 4852 generic.go:334] "Generic (PLEG): container finished" podID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerID="fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d" exitCode=0 Jan 29 13:17:30 crc kubenswrapper[4852]: I0129 13:17:30.355572 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerDied","Data":"fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d"} Jan 29 13:17:30 crc kubenswrapper[4852]: I0129 13:17:30.355800 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerStarted","Data":"2cf62c6e1bb589c4708afdad22d4d0efba1b1074298a04eb443020c1fa8cd408"} Jan 29 13:17:30 crc kubenswrapper[4852]: I0129 13:17:30.358543 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:17:31 crc kubenswrapper[4852]: I0129 13:17:31.368459 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerStarted","Data":"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269"} Jan 29 13:17:38 crc kubenswrapper[4852]: I0129 13:17:38.451196 4852 generic.go:334] "Generic (PLEG): container finished" podID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerID="3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269" exitCode=0 Jan 29 13:17:38 crc kubenswrapper[4852]: I0129 13:17:38.451726 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerDied","Data":"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269"} Jan 29 13:17:40 crc kubenswrapper[4852]: I0129 13:17:40.463808 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:17:40 crc kubenswrapper[4852]: E0129 13:17:40.464819 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:17:40 crc kubenswrapper[4852]: I0129 13:17:40.476394 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerStarted","Data":"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb"} Jan 29 13:17:40 crc kubenswrapper[4852]: I0129 13:17:40.514483 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mrz4g" podStartSLOduration=3.676138821 podStartE2EDuration="12.514453476s" podCreationTimestamp="2026-01-29 13:17:28 +0000 UTC" firstStartedPulling="2026-01-29 13:17:30.358152544 +0000 UTC m=+9347.575483708" lastFinishedPulling="2026-01-29 13:17:39.196467209 +0000 UTC m=+9356.413798363" observedRunningTime="2026-01-29 13:17:40.500869005 +0000 UTC m=+9357.718200149" watchObservedRunningTime="2026-01-29 13:17:40.514453476 +0000 UTC m=+9357.731784650" Jan 29 13:17:48 crc kubenswrapper[4852]: I0129 13:17:48.644022 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:48 crc kubenswrapper[4852]: I0129 13:17:48.644830 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:48 crc kubenswrapper[4852]: I0129 13:17:48.701055 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:49 crc kubenswrapper[4852]: I0129 13:17:49.637281 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:49 crc kubenswrapper[4852]: I0129 13:17:49.700958 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:51 crc kubenswrapper[4852]: I0129 13:17:51.599010 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mrz4g" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="registry-server" containerID="cri-o://d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb" gracePeriod=2 Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.253720 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.322508 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities\") pod \"04614b81-4589-4b4b-867c-c6d019a5e46d\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.322698 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2htrm\" (UniqueName: \"kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm\") pod \"04614b81-4589-4b4b-867c-c6d019a5e46d\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.322755 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content\") pod \"04614b81-4589-4b4b-867c-c6d019a5e46d\" (UID: \"04614b81-4589-4b4b-867c-c6d019a5e46d\") " Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.324453 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities" (OuterVolumeSpecName: "utilities") pod "04614b81-4589-4b4b-867c-c6d019a5e46d" (UID: "04614b81-4589-4b4b-867c-c6d019a5e46d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.331255 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm" (OuterVolumeSpecName: "kube-api-access-2htrm") pod "04614b81-4589-4b4b-867c-c6d019a5e46d" (UID: "04614b81-4589-4b4b-867c-c6d019a5e46d"). InnerVolumeSpecName "kube-api-access-2htrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.426381 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.426424 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2htrm\" (UniqueName: \"kubernetes.io/projected/04614b81-4589-4b4b-867c-c6d019a5e46d-kube-api-access-2htrm\") on node \"crc\" DevicePath \"\"" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.455893 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04614b81-4589-4b4b-867c-c6d019a5e46d" (UID: "04614b81-4589-4b4b-867c-c6d019a5e46d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.531362 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04614b81-4589-4b4b-867c-c6d019a5e46d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.611884 4852 generic.go:334] "Generic (PLEG): container finished" podID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerID="d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb" exitCode=0 Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.611930 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerDied","Data":"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb"} Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.611969 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrz4g" event={"ID":"04614b81-4589-4b4b-867c-c6d019a5e46d","Type":"ContainerDied","Data":"2cf62c6e1bb589c4708afdad22d4d0efba1b1074298a04eb443020c1fa8cd408"} Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.611989 4852 scope.go:117] "RemoveContainer" containerID="d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.612042 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrz4g" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.639512 4852 scope.go:117] "RemoveContainer" containerID="3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.660464 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.669952 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mrz4g"] Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.675262 4852 scope.go:117] "RemoveContainer" containerID="fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.741388 4852 scope.go:117] "RemoveContainer" containerID="d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb" Jan 29 13:17:52 crc kubenswrapper[4852]: E0129 13:17:52.742137 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb\": container with ID starting with d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb not found: ID does not exist" containerID="d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.742166 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb"} err="failed to get container status \"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb\": rpc error: code = NotFound desc = could not find container \"d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb\": container with ID starting with d347551d22cc7d22d2f8656b6a8a58ebcc58bbd838298b99b647602ee63c66eb not found: ID does not exist" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.742187 4852 scope.go:117] "RemoveContainer" containerID="3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269" Jan 29 13:17:52 crc kubenswrapper[4852]: E0129 13:17:52.748246 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269\": container with ID starting with 3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269 not found: ID does not exist" containerID="3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.748295 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269"} err="failed to get container status \"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269\": rpc error: code = NotFound desc = could not find container \"3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269\": container with ID starting with 3bf0438004c55da6509168446e091c949333e2fd29e5dd9d7f0330aa53db4269 not found: ID does not exist" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.748323 4852 scope.go:117] "RemoveContainer" containerID="fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d" Jan 29 13:17:52 crc kubenswrapper[4852]: E0129 13:17:52.751938 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d\": container with ID starting with fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d not found: ID does not exist" containerID="fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d" Jan 29 13:17:52 crc kubenswrapper[4852]: I0129 13:17:52.751979 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d"} err="failed to get container status \"fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d\": rpc error: code = NotFound desc = could not find container \"fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d\": container with ID starting with fbfa310b79dfb85b304e47cc191a4f6a646d8a2748bea9f647caf5529180119d not found: ID does not exist" Jan 29 13:17:53 crc kubenswrapper[4852]: I0129 13:17:53.479274 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" path="/var/lib/kubelet/pods/04614b81-4589-4b4b-867c-c6d019a5e46d/volumes" Jan 29 13:17:55 crc kubenswrapper[4852]: I0129 13:17:55.463808 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:17:55 crc kubenswrapper[4852]: E0129 13:17:55.465747 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:18:06 crc kubenswrapper[4852]: I0129 13:18:06.463387 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:18:06 crc kubenswrapper[4852]: E0129 13:18:06.464635 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:18:17 crc kubenswrapper[4852]: I0129 13:18:17.463405 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:18:17 crc kubenswrapper[4852]: E0129 13:18:17.464090 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:18:32 crc kubenswrapper[4852]: I0129 13:18:32.463193 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:18:33 crc kubenswrapper[4852]: I0129 13:18:33.102389 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b"} Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.424052 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:00 crc kubenswrapper[4852]: E0129 13:19:00.425187 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="extract-utilities" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.425201 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="extract-utilities" Jan 29 13:19:00 crc kubenswrapper[4852]: E0129 13:19:00.425241 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="extract-content" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.425247 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="extract-content" Jan 29 13:19:00 crc kubenswrapper[4852]: E0129 13:19:00.425265 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="registry-server" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.425270 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="registry-server" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.425465 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="04614b81-4589-4b4b-867c-c6d019a5e46d" containerName="registry-server" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.427515 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.437051 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.559015 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.559721 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvlk8\" (UniqueName: \"kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.560193 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.662654 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.662779 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.662856 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvlk8\" (UniqueName: \"kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.663320 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.663653 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.690181 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvlk8\" (UniqueName: \"kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8\") pod \"redhat-marketplace-z6lvk\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:00 crc kubenswrapper[4852]: I0129 13:19:00.757689 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:01 crc kubenswrapper[4852]: I0129 13:19:01.351493 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:01 crc kubenswrapper[4852]: I0129 13:19:01.414437 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerStarted","Data":"709e24018cb325e25cd0a7a2a44a8b3bb351451e52b8a69ed41ac08f3933001c"} Jan 29 13:19:02 crc kubenswrapper[4852]: I0129 13:19:02.441981 4852 generic.go:334] "Generic (PLEG): container finished" podID="7125b355-869a-48da-842b-347a1c382d91" containerID="ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403" exitCode=0 Jan 29 13:19:02 crc kubenswrapper[4852]: I0129 13:19:02.442610 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerDied","Data":"ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403"} Jan 29 13:19:04 crc kubenswrapper[4852]: I0129 13:19:04.463655 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerStarted","Data":"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c"} Jan 29 13:19:05 crc kubenswrapper[4852]: I0129 13:19:05.474290 4852 generic.go:334] "Generic (PLEG): container finished" podID="7125b355-869a-48da-842b-347a1c382d91" containerID="56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c" exitCode=0 Jan 29 13:19:05 crc kubenswrapper[4852]: I0129 13:19:05.483274 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerDied","Data":"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c"} Jan 29 13:19:07 crc kubenswrapper[4852]: I0129 13:19:07.503483 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerStarted","Data":"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096"} Jan 29 13:19:07 crc kubenswrapper[4852]: I0129 13:19:07.537549 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z6lvk" podStartSLOduration=4.034106387 podStartE2EDuration="7.537522909s" podCreationTimestamp="2026-01-29 13:19:00 +0000 UTC" firstStartedPulling="2026-01-29 13:19:02.448363555 +0000 UTC m=+9439.665694729" lastFinishedPulling="2026-01-29 13:19:05.951780077 +0000 UTC m=+9443.169111251" observedRunningTime="2026-01-29 13:19:07.524110432 +0000 UTC m=+9444.741441576" watchObservedRunningTime="2026-01-29 13:19:07.537522909 +0000 UTC m=+9444.754854053" Jan 29 13:19:10 crc kubenswrapper[4852]: I0129 13:19:10.758147 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:10 crc kubenswrapper[4852]: I0129 13:19:10.760705 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:10 crc kubenswrapper[4852]: I0129 13:19:10.831218 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:11 crc kubenswrapper[4852]: I0129 13:19:11.645663 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:11 crc kubenswrapper[4852]: I0129 13:19:11.698638 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:13 crc kubenswrapper[4852]: I0129 13:19:13.578456 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z6lvk" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="registry-server" containerID="cri-o://9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096" gracePeriod=2 Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.166668 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.198206 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities\") pod \"7125b355-869a-48da-842b-347a1c382d91\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.198265 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content\") pod \"7125b355-869a-48da-842b-347a1c382d91\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.198308 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvlk8\" (UniqueName: \"kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8\") pod \"7125b355-869a-48da-842b-347a1c382d91\" (UID: \"7125b355-869a-48da-842b-347a1c382d91\") " Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.200160 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities" (OuterVolumeSpecName: "utilities") pod "7125b355-869a-48da-842b-347a1c382d91" (UID: "7125b355-869a-48da-842b-347a1c382d91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.208520 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8" (OuterVolumeSpecName: "kube-api-access-gvlk8") pod "7125b355-869a-48da-842b-347a1c382d91" (UID: "7125b355-869a-48da-842b-347a1c382d91"). InnerVolumeSpecName "kube-api-access-gvlk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.227393 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7125b355-869a-48da-842b-347a1c382d91" (UID: "7125b355-869a-48da-842b-347a1c382d91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.301286 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.301323 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7125b355-869a-48da-842b-347a1c382d91-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.301336 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvlk8\" (UniqueName: \"kubernetes.io/projected/7125b355-869a-48da-842b-347a1c382d91-kube-api-access-gvlk8\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.594648 4852 generic.go:334] "Generic (PLEG): container finished" podID="7125b355-869a-48da-842b-347a1c382d91" containerID="9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096" exitCode=0 Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.594746 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerDied","Data":"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096"} Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.594797 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6lvk" event={"ID":"7125b355-869a-48da-842b-347a1c382d91","Type":"ContainerDied","Data":"709e24018cb325e25cd0a7a2a44a8b3bb351451e52b8a69ed41ac08f3933001c"} Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.594827 4852 scope.go:117] "RemoveContainer" containerID="9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.594862 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6lvk" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.644818 4852 scope.go:117] "RemoveContainer" containerID="56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.664543 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.674302 4852 scope.go:117] "RemoveContainer" containerID="ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.679957 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6lvk"] Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.755443 4852 scope.go:117] "RemoveContainer" containerID="9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096" Jan 29 13:19:14 crc kubenswrapper[4852]: E0129 13:19:14.756899 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096\": container with ID starting with 9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096 not found: ID does not exist" containerID="9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.756946 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096"} err="failed to get container status \"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096\": rpc error: code = NotFound desc = could not find container \"9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096\": container with ID starting with 9f33644cdf2e7360f025107b732c451715fc62a76c07811091d52aed63019096 not found: ID does not exist" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.756997 4852 scope.go:117] "RemoveContainer" containerID="56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c" Jan 29 13:19:14 crc kubenswrapper[4852]: E0129 13:19:14.757867 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c\": container with ID starting with 56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c not found: ID does not exist" containerID="56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.757926 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c"} err="failed to get container status \"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c\": rpc error: code = NotFound desc = could not find container \"56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c\": container with ID starting with 56bb298615b033997a8ef9af24755aed1156c95ae59bf6e9859789d2516c412c not found: ID does not exist" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.757958 4852 scope.go:117] "RemoveContainer" containerID="ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403" Jan 29 13:19:14 crc kubenswrapper[4852]: E0129 13:19:14.758348 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403\": container with ID starting with ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403 not found: ID does not exist" containerID="ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403" Jan 29 13:19:14 crc kubenswrapper[4852]: I0129 13:19:14.758376 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403"} err="failed to get container status \"ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403\": rpc error: code = NotFound desc = could not find container \"ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403\": container with ID starting with ef0909c5a8e7df986ae42d31d810c4da5c17810b1e1d75201ac3b45f128d2403 not found: ID does not exist" Jan 29 13:19:15 crc kubenswrapper[4852]: I0129 13:19:15.477379 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7125b355-869a-48da-842b-347a1c382d91" path="/var/lib/kubelet/pods/7125b355-869a-48da-842b-347a1c382d91/volumes" Jan 29 13:21:00 crc kubenswrapper[4852]: I0129 13:21:00.017494 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:21:00 crc kubenswrapper[4852]: I0129 13:21:00.018128 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:21:30 crc kubenswrapper[4852]: I0129 13:21:30.016815 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:21:30 crc kubenswrapper[4852]: I0129 13:21:30.017453 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.017767 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.018680 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.018768 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.020147 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.020229 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b" gracePeriod=600 Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.603164 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b" exitCode=0 Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.603228 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b"} Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.603873 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa"} Jan 29 13:22:00 crc kubenswrapper[4852]: I0129 13:22:00.603903 4852 scope.go:117] "RemoveContainer" containerID="4ee771e8694ab2d277f0d8549a2740314e3c0517377a652d69a0634d0752fab8" Jan 29 13:22:06 crc kubenswrapper[4852]: I0129 13:22:06.791943 4852 scope.go:117] "RemoveContainer" containerID="aa36fd9413902fe5e0dce7036c0bc154941f796d0e85df24ed7f46e02b38b8b9" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.808447 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:20 crc kubenswrapper[4852]: E0129 13:22:20.810159 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="extract-utilities" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.810211 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="extract-utilities" Jan 29 13:22:20 crc kubenswrapper[4852]: E0129 13:22:20.810226 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="extract-content" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.810236 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="extract-content" Jan 29 13:22:20 crc kubenswrapper[4852]: E0129 13:22:20.810305 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="registry-server" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.810356 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="registry-server" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.810821 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="7125b355-869a-48da-842b-347a1c382d91" containerName="registry-server" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.819039 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.855616 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.973360 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.973529 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:20 crc kubenswrapper[4852]: I0129 13:22:20.973648 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w464m\" (UniqueName: \"kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.075388 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.075698 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.076125 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.076227 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.076462 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w464m\" (UniqueName: \"kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.115626 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w464m\" (UniqueName: \"kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m\") pod \"certified-operators-4dv5k\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.151023 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.688640 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:21 crc kubenswrapper[4852]: I0129 13:22:21.874225 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerStarted","Data":"47eef8ce4a88f5f5498f6f7f7b9ad088b95e3ef5df4268fbb2cd6d9f5313761a"} Jan 29 13:22:22 crc kubenswrapper[4852]: I0129 13:22:22.912449 4852 generic.go:334] "Generic (PLEG): container finished" podID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerID="e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc" exitCode=0 Jan 29 13:22:22 crc kubenswrapper[4852]: I0129 13:22:22.912763 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerDied","Data":"e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc"} Jan 29 13:22:23 crc kubenswrapper[4852]: I0129 13:22:23.927769 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerStarted","Data":"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14"} Jan 29 13:22:25 crc kubenswrapper[4852]: I0129 13:22:25.959807 4852 generic.go:334] "Generic (PLEG): container finished" podID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerID="0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14" exitCode=0 Jan 29 13:22:25 crc kubenswrapper[4852]: I0129 13:22:25.959983 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerDied","Data":"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14"} Jan 29 13:22:26 crc kubenswrapper[4852]: I0129 13:22:26.984635 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerStarted","Data":"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e"} Jan 29 13:22:27 crc kubenswrapper[4852]: I0129 13:22:27.010456 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4dv5k" podStartSLOduration=3.528181796 podStartE2EDuration="7.010430259s" podCreationTimestamp="2026-01-29 13:22:20 +0000 UTC" firstStartedPulling="2026-01-29 13:22:22.916096577 +0000 UTC m=+9640.133427711" lastFinishedPulling="2026-01-29 13:22:26.398345 +0000 UTC m=+9643.615676174" observedRunningTime="2026-01-29 13:22:26.998804796 +0000 UTC m=+9644.216135950" watchObservedRunningTime="2026-01-29 13:22:27.010430259 +0000 UTC m=+9644.227761423" Jan 29 13:22:31 crc kubenswrapper[4852]: I0129 13:22:31.151381 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:31 crc kubenswrapper[4852]: I0129 13:22:31.152085 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:31 crc kubenswrapper[4852]: I0129 13:22:31.198150 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:32 crc kubenswrapper[4852]: I0129 13:22:32.089112 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:32 crc kubenswrapper[4852]: I0129 13:22:32.158034 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.062951 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4dv5k" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="registry-server" containerID="cri-o://8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e" gracePeriod=2 Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.618439 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.732106 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content\") pod \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.732246 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w464m\" (UniqueName: \"kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m\") pod \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.732443 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities\") pod \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\" (UID: \"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57\") " Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.733515 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities" (OuterVolumeSpecName: "utilities") pod "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" (UID: "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.743880 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m" (OuterVolumeSpecName: "kube-api-access-w464m") pod "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" (UID: "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57"). InnerVolumeSpecName "kube-api-access-w464m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.790136 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" (UID: "ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.835214 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w464m\" (UniqueName: \"kubernetes.io/projected/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-kube-api-access-w464m\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.835246 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:34 crc kubenswrapper[4852]: I0129 13:22:34.835255 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.077992 4852 generic.go:334] "Generic (PLEG): container finished" podID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerID="8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e" exitCode=0 Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.078045 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerDied","Data":"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e"} Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.078079 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dv5k" event={"ID":"ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57","Type":"ContainerDied","Data":"47eef8ce4a88f5f5498f6f7f7b9ad088b95e3ef5df4268fbb2cd6d9f5313761a"} Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.078098 4852 scope.go:117] "RemoveContainer" containerID="8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.078119 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dv5k" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.109599 4852 scope.go:117] "RemoveContainer" containerID="0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.142350 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.159003 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4dv5k"] Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.166481 4852 scope.go:117] "RemoveContainer" containerID="e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.202828 4852 scope.go:117] "RemoveContainer" containerID="8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e" Jan 29 13:22:35 crc kubenswrapper[4852]: E0129 13:22:35.203410 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e\": container with ID starting with 8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e not found: ID does not exist" containerID="8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.203451 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e"} err="failed to get container status \"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e\": rpc error: code = NotFound desc = could not find container \"8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e\": container with ID starting with 8e58967e53e5faeeebcc93df134c5f745e0b9d0fcef0aaa5b3f2d17dcaaf0c8e not found: ID does not exist" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.203477 4852 scope.go:117] "RemoveContainer" containerID="0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14" Jan 29 13:22:35 crc kubenswrapper[4852]: E0129 13:22:35.203766 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14\": container with ID starting with 0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14 not found: ID does not exist" containerID="0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.203796 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14"} err="failed to get container status \"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14\": rpc error: code = NotFound desc = could not find container \"0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14\": container with ID starting with 0dccee2c8bb9f95a858e6e2525a410a63893d938d745f6c07130f363daba7d14 not found: ID does not exist" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.203811 4852 scope.go:117] "RemoveContainer" containerID="e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc" Jan 29 13:22:35 crc kubenswrapper[4852]: E0129 13:22:35.204100 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc\": container with ID starting with e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc not found: ID does not exist" containerID="e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.204133 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc"} err="failed to get container status \"e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc\": rpc error: code = NotFound desc = could not find container \"e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc\": container with ID starting with e8430bc00aaf5f0b339b8788a1974a4b405566ea085d0c87fba9aad8398850fc not found: ID does not exist" Jan 29 13:22:35 crc kubenswrapper[4852]: I0129 13:22:35.478188 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" path="/var/lib/kubelet/pods/ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57/volumes" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.553577 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:05 crc kubenswrapper[4852]: E0129 13:23:05.557214 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="extract-utilities" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.557323 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="extract-utilities" Jan 29 13:23:05 crc kubenswrapper[4852]: E0129 13:23:05.557411 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="registry-server" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.557540 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="registry-server" Jan 29 13:23:05 crc kubenswrapper[4852]: E0129 13:23:05.557640 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="extract-content" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.557705 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="extract-content" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.557987 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7ae45a-f233-44a4-a32f-6cc2b8ee5a57" containerName="registry-server" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.559559 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.576302 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.593883 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.594000 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l6kf\" (UniqueName: \"kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.594044 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.696269 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l6kf\" (UniqueName: \"kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.696346 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.696850 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.697192 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.697451 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.859860 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l6kf\" (UniqueName: \"kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf\") pod \"community-operators-r7t6w\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:05 crc kubenswrapper[4852]: I0129 13:23:05.889300 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:06 crc kubenswrapper[4852]: I0129 13:23:06.513903 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:07 crc kubenswrapper[4852]: I0129 13:23:07.495328 4852 generic.go:334] "Generic (PLEG): container finished" podID="b69be642-e330-43a2-bd81-2358e0d2369a" containerID="b2d8eb968fd2aa3456266e22afe726f95b27d9f51050517bd5a2fb05a8e6e778" exitCode=0 Jan 29 13:23:07 crc kubenswrapper[4852]: I0129 13:23:07.495378 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerDied","Data":"b2d8eb968fd2aa3456266e22afe726f95b27d9f51050517bd5a2fb05a8e6e778"} Jan 29 13:23:07 crc kubenswrapper[4852]: I0129 13:23:07.495971 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerStarted","Data":"805931ba4aaa73f42769616de080c058a95ff4ca5f1bf858ef4998222b1f8728"} Jan 29 13:23:07 crc kubenswrapper[4852]: I0129 13:23:07.498898 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:23:09 crc kubenswrapper[4852]: I0129 13:23:09.527330 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerStarted","Data":"e2bf98532ab48518cde3cd02e5ddd69aad9df58fa5a1f35c2b8352c74dca0702"} Jan 29 13:23:10 crc kubenswrapper[4852]: I0129 13:23:10.540136 4852 generic.go:334] "Generic (PLEG): container finished" podID="b69be642-e330-43a2-bd81-2358e0d2369a" containerID="e2bf98532ab48518cde3cd02e5ddd69aad9df58fa5a1f35c2b8352c74dca0702" exitCode=0 Jan 29 13:23:10 crc kubenswrapper[4852]: I0129 13:23:10.540419 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerDied","Data":"e2bf98532ab48518cde3cd02e5ddd69aad9df58fa5a1f35c2b8352c74dca0702"} Jan 29 13:23:11 crc kubenswrapper[4852]: I0129 13:23:11.553449 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerStarted","Data":"564b860e992e478b245b3348090df6684fb5ca7faa2d066f34c0845016a77086"} Jan 29 13:23:11 crc kubenswrapper[4852]: I0129 13:23:11.583116 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r7t6w" podStartSLOduration=2.930584504 podStartE2EDuration="6.583095325s" podCreationTimestamp="2026-01-29 13:23:05 +0000 UTC" firstStartedPulling="2026-01-29 13:23:07.497846364 +0000 UTC m=+9684.715177538" lastFinishedPulling="2026-01-29 13:23:11.150357225 +0000 UTC m=+9688.367688359" observedRunningTime="2026-01-29 13:23:11.580267266 +0000 UTC m=+9688.797598480" watchObservedRunningTime="2026-01-29 13:23:11.583095325 +0000 UTC m=+9688.800426459" Jan 29 13:23:15 crc kubenswrapper[4852]: I0129 13:23:15.889840 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:15 crc kubenswrapper[4852]: I0129 13:23:15.890624 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:15 crc kubenswrapper[4852]: I0129 13:23:15.971662 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:16 crc kubenswrapper[4852]: I0129 13:23:16.670632 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:16 crc kubenswrapper[4852]: I0129 13:23:16.725436 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:18 crc kubenswrapper[4852]: I0129 13:23:18.632970 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r7t6w" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="registry-server" containerID="cri-o://564b860e992e478b245b3348090df6684fb5ca7faa2d066f34c0845016a77086" gracePeriod=2 Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.646768 4852 generic.go:334] "Generic (PLEG): container finished" podID="b69be642-e330-43a2-bd81-2358e0d2369a" containerID="564b860e992e478b245b3348090df6684fb5ca7faa2d066f34c0845016a77086" exitCode=0 Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.646814 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerDied","Data":"564b860e992e478b245b3348090df6684fb5ca7faa2d066f34c0845016a77086"} Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.647334 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7t6w" event={"ID":"b69be642-e330-43a2-bd81-2358e0d2369a","Type":"ContainerDied","Data":"805931ba4aaa73f42769616de080c058a95ff4ca5f1bf858ef4998222b1f8728"} Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.647352 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="805931ba4aaa73f42769616de080c058a95ff4ca5f1bf858ef4998222b1f8728" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.672894 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.733020 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities\") pod \"b69be642-e330-43a2-bd81-2358e0d2369a\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.733229 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9l6kf\" (UniqueName: \"kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf\") pod \"b69be642-e330-43a2-bd81-2358e0d2369a\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.733326 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content\") pod \"b69be642-e330-43a2-bd81-2358e0d2369a\" (UID: \"b69be642-e330-43a2-bd81-2358e0d2369a\") " Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.733896 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities" (OuterVolumeSpecName: "utilities") pod "b69be642-e330-43a2-bd81-2358e0d2369a" (UID: "b69be642-e330-43a2-bd81-2358e0d2369a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.739225 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf" (OuterVolumeSpecName: "kube-api-access-9l6kf") pod "b69be642-e330-43a2-bd81-2358e0d2369a" (UID: "b69be642-e330-43a2-bd81-2358e0d2369a"). InnerVolumeSpecName "kube-api-access-9l6kf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.787699 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b69be642-e330-43a2-bd81-2358e0d2369a" (UID: "b69be642-e330-43a2-bd81-2358e0d2369a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.836430 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9l6kf\" (UniqueName: \"kubernetes.io/projected/b69be642-e330-43a2-bd81-2358e0d2369a-kube-api-access-9l6kf\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.836461 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:19 crc kubenswrapper[4852]: I0129 13:23:19.836490 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b69be642-e330-43a2-bd81-2358e0d2369a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:20 crc kubenswrapper[4852]: I0129 13:23:20.659964 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7t6w" Jan 29 13:23:20 crc kubenswrapper[4852]: I0129 13:23:20.712033 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:20 crc kubenswrapper[4852]: I0129 13:23:20.724212 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r7t6w"] Jan 29 13:23:21 crc kubenswrapper[4852]: I0129 13:23:21.481058 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" path="/var/lib/kubelet/pods/b69be642-e330-43a2-bd81-2358e0d2369a/volumes" Jan 29 13:24:00 crc kubenswrapper[4852]: I0129 13:24:00.017255 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:24:00 crc kubenswrapper[4852]: I0129 13:24:00.018119 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:24:30 crc kubenswrapper[4852]: I0129 13:24:30.017358 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:24:30 crc kubenswrapper[4852]: I0129 13:24:30.019296 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:25:00 crc kubenswrapper[4852]: I0129 13:25:00.016841 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:25:00 crc kubenswrapper[4852]: I0129 13:25:00.017777 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:25:00 crc kubenswrapper[4852]: I0129 13:25:00.017904 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:25:00 crc kubenswrapper[4852]: I0129 13:25:00.019624 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:25:00 crc kubenswrapper[4852]: I0129 13:25:00.019778 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" gracePeriod=600 Jan 29 13:25:00 crc kubenswrapper[4852]: E0129 13:25:00.157152 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:25:01 crc kubenswrapper[4852]: I0129 13:25:01.125838 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" exitCode=0 Jan 29 13:25:01 crc kubenswrapper[4852]: I0129 13:25:01.125961 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa"} Jan 29 13:25:01 crc kubenswrapper[4852]: I0129 13:25:01.126235 4852 scope.go:117] "RemoveContainer" containerID="0519e4d74bd6018d6d1ad5b1d3f65051d105abdbb4dda7840a35e918e71ff13b" Jan 29 13:25:01 crc kubenswrapper[4852]: I0129 13:25:01.126850 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:25:01 crc kubenswrapper[4852]: E0129 13:25:01.127130 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:25:14 crc kubenswrapper[4852]: I0129 13:25:14.463265 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:25:14 crc kubenswrapper[4852]: E0129 13:25:14.464137 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:25:28 crc kubenswrapper[4852]: I0129 13:25:28.463892 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:25:28 crc kubenswrapper[4852]: E0129 13:25:28.465016 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:25:35 crc kubenswrapper[4852]: I0129 13:25:35.781629 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_8da49752-6061-42b7-b410-7ce385b6a075/init-config-reloader/0.log" Jan 29 13:25:35 crc kubenswrapper[4852]: I0129 13:25:35.993237 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_8da49752-6061-42b7-b410-7ce385b6a075/init-config-reloader/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.020467 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_8da49752-6061-42b7-b410-7ce385b6a075/alertmanager/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.063431 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_8da49752-6061-42b7-b410-7ce385b6a075/config-reloader/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.230058 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e385c621-ad37-4274-90fd-0d71c5e7ddd3/aodh-api/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.334495 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e385c621-ad37-4274-90fd-0d71c5e7ddd3/aodh-evaluator/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.356760 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e385c621-ad37-4274-90fd-0d71c5e7ddd3/aodh-listener/0.log" Jan 29 13:25:36 crc kubenswrapper[4852]: I0129 13:25:36.433920 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e385c621-ad37-4274-90fd-0d71c5e7ddd3/aodh-notifier/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.081515 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6ffccc7596-b485b_2c7bcc2b-3201-4828-9f3a-44174ff6b77e/barbican-api-log/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.108174 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6ffccc7596-b485b_2c7bcc2b-3201-4828-9f3a-44174ff6b77e/barbican-api/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.351746 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-cddbf6bbd-2jh9z_8a17de25-6f02-4f68-88c8-36d3c1450821/barbican-keystone-listener-log/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.380319 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-cddbf6bbd-2jh9z_8a17de25-6f02-4f68-88c8-36d3c1450821/barbican-keystone-listener/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.424834 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5fff45d747-mx7dg_6d7f137b-b458-4df9-9c45-09c744fa6362/barbican-worker/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.542007 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5fff45d747-mx7dg_6d7f137b-b458-4df9-9c45-09c744fa6362/barbican-worker-log/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.620987 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-qt2w4_d212a77f-957e-4bc0-ae22-614da3a67d21/bootstrap-openstack-openstack-cell1/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.791808 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3/ceilometer-central-agent/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.870573 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3/proxy-httpd/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.907396 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3/ceilometer-notification-agent/0.log" Jan 29 13:25:37 crc kubenswrapper[4852]: I0129 13:25:37.960218 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_108f4beb-9e0e-4e7f-bf7d-7bcfe5959ce3/sg-core/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.099678 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-z622l_f5a52527-628d-427d-8edc-59dc40c51386/ceph-client-openstack-openstack-cell1/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.257239 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_664f10e3-cc63-4eb5-8474-0dec1f35b938/cinder-api/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.310130 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_664f10e3-cc63-4eb5-8474-0dec1f35b938/cinder-api-log/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.579955 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_65005606-1bf1-42ed-99fd-afc7949c61a0/cinder-backup/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.642704 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_65005606-1bf1-42ed-99fd-afc7949c61a0/probe/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.728285 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f67f9eec-ad75-496b-ba55-f7773c45bff3/cinder-scheduler/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.831974 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f67f9eec-ad75-496b-ba55-f7773c45bff3/probe/0.log" Jan 29 13:25:38 crc kubenswrapper[4852]: I0129 13:25:38.936443 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_396524c3-a9fc-4041-a7bf-86f088a7b1ea/cinder-volume/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.003719 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_396524c3-a9fc-4041-a7bf-86f088a7b1ea/probe/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.163684 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-wrb7z_7d79020b-902b-425d-ab7d-d73c666d7582/configure-network-openstack-openstack-cell1/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.226440 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-zw2j2_42cffd93-f8fd-4c04-b766-98fc38dcae2e/configure-os-openstack-openstack-cell1/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.343505 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-645d55dd59-xntcj_15e0934d-7a6d-4db7-8faa-78a576e92e58/init/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.484987 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-645d55dd59-xntcj_15e0934d-7a6d-4db7-8faa-78a576e92e58/init/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.529674 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-hz8sq_767e03a5-76cc-49ce-a6b5-0c18616f0405/download-cache-openstack-openstack-cell1/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.551229 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-645d55dd59-xntcj_15e0934d-7a6d-4db7-8faa-78a576e92e58/dnsmasq-dns/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.735934 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_212d90fc-8389-41db-b187-e76812e4d072/glance-log/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.772504 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_212d90fc-8389-41db-b187-e76812e4d072/glance-httpd/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.865018 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_04a9fabd-cf4f-4357-8490-c232eada6b3a/glance-httpd/0.log" Jan 29 13:25:39 crc kubenswrapper[4852]: I0129 13:25:39.920254 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_04a9fabd-cf4f-4357-8490-c232eada6b3a/glance-log/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.103462 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-bdb79664c-nwljl_ac1eac8d-5358-4cc0-940f-36a201478932/heat-api/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.277247 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5fbdc7b9-2dk72_6bc0a69d-dab1-4196-8f3f-924b88ea5a79/heat-cfnapi/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.325958 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-5fbc98c975-bc5tz_1c8bf67c-6118-43f4-8de8-ee434145007c/heat-engine/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.468002 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-545cbcccb9-rs75k_1552da5b-1c75-4b36-9e71-ea8cd1c9af06/horizon/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.578642 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-b9std_24859cd7-165a-4b80-affd-fea7857b2d93/install-certs-openstack-openstack-cell1/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.607462 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-545cbcccb9-rs75k_1552da5b-1c75-4b36-9e71-ea8cd1c9af06/horizon-log/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.765778 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-v5scc_af8a99a6-9c2a-4944-8ec6-4817ebfde889/install-os-openstack-openstack-cell1/0.log" Jan 29 13:25:40 crc kubenswrapper[4852]: I0129 13:25:40.865207 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29494861-dcvxd_dbd0f679-8736-4fc7-a3e6-7b0d58e325e8/keystone-cron/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.064484 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c85c65b76-hsllc_2a761271-dd86-465e-86af-c5ba36c7cb64/keystone-api/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.382202 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_68096b01-9bf9-4e74-9dc9-3521b1ea4fba/kube-state-metrics/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.396395 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-gqh6l_bd812255-d120-4225-98da-716373cacd08/libvirt-openstack-openstack-cell1/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.646188 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_cc7d9b76-36d6-435f-9999-e9e468154bda/manila-api-log/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.683401 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_cc7d9b76-36d6-435f-9999-e9e468154bda/manila-api/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.795375 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_0ec170b2-e22f-40a0-a407-e6b873103fc1/manila-scheduler/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.809527 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_0ec170b2-e22f-40a0-a407-e6b873103fc1/probe/0.log" Jan 29 13:25:41 crc kubenswrapper[4852]: I0129 13:25:41.877175 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_a8e48974-dfb6-4275-9c9e-0ff74a9f06d1/probe/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.004366 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_a8e48974-dfb6-4275-9c9e-0ff74a9f06d1/manila-share/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.402969 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-65cc64b9b9-ppj6r_38a3afdc-64d5-4c8b-ae4f-87d376c11ad1/neutron-httpd/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.427132 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-65cc64b9b9-ppj6r_38a3afdc-64d5-4c8b-ae4f-87d376c11ad1/neutron-api/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.463480 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:25:42 crc kubenswrapper[4852]: E0129 13:25:42.463745 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.523331 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-bvz6z_c0571917-6ca8-4aa5-b046-f20ff3909490/neutron-dhcp-openstack-openstack-cell1/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.720850 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-kd7tl_98bc1451-81f8-41ed-8e6e-c56eca9ca0f1/neutron-metadata-openstack-openstack-cell1/0.log" Jan 29 13:25:42 crc kubenswrapper[4852]: I0129 13:25:42.789787 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-rpffn_82d924c3-a1a8-425d-8ccd-f83a7053b057/neutron-sriov-openstack-openstack-cell1/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.089733 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8a752a8f-23da-4add-9dd1-3d765dd5440e/nova-api-api/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.148105 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_8a752a8f-23da-4add-9dd1-3d765dd5440e/nova-api-log/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.295487 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_f31f0e59-612f-4180-ab59-98f0e79985f0/nova-cell0-conductor-conductor/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.374060 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_36a67a7e-1743-4610-9fe3-cb6a394c70a2/nova-cell1-conductor-conductor/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.649711 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_5db2d6f5-d3f0-420f-bb9b-9a3d322d5d97/nova-cell1-novncproxy-novncproxy/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.659747 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celldwwhd_6a2c5425-b9b5-4030-9e85-4a935c5d0cd1/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Jan 29 13:25:43 crc kubenswrapper[4852]: I0129 13:25:43.869130 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-snxf7_90675cdc-8008-4d42-8f75-e0f67752eef4/nova-cell1-openstack-openstack-cell1/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.067664 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3724deaf-3c35-42d5-8b3c-f7306b134b15/nova-metadata-log/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.081513 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_3724deaf-3c35-42d5-8b3c-f7306b134b15/nova-metadata-metadata/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.305796 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_c4f2ec21-7302-4fa0-8ceb-0a9cdfa15841/nova-scheduler-scheduler/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.310695 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-7679fffcf8-j62tz_2fe31de4-bde3-421f-92d2-c5cb8b3022fd/init/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.498946 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-7679fffcf8-j62tz_2fe31de4-bde3-421f-92d2-c5cb8b3022fd/init/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.560349 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-7679fffcf8-j62tz_2fe31de4-bde3-421f-92d2-c5cb8b3022fd/octavia-api-provider-agent/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.753755 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-hchhs_0334a3c1-e942-474f-8d03-eb17b89a609f/init/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.851836 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-7679fffcf8-j62tz_2fe31de4-bde3-421f-92d2-c5cb8b3022fd/octavia-api/0.log" Jan 29 13:25:44 crc kubenswrapper[4852]: I0129 13:25:44.941551 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-hchhs_0334a3c1-e942-474f-8d03-eb17b89a609f/init/0.log" Jan 29 13:25:45 crc kubenswrapper[4852]: I0129 13:25:45.011441 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-hchhs_0334a3c1-e942-474f-8d03-eb17b89a609f/octavia-healthmanager/0.log" Jan 29 13:25:45 crc kubenswrapper[4852]: I0129 13:25:45.099926 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-gqwgh_531f02fa-337f-4e2a-a7fd-6877aacd308d/init/0.log" Jan 29 13:25:45 crc kubenswrapper[4852]: I0129 13:25:45.272074 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-gqwgh_531f02fa-337f-4e2a-a7fd-6877aacd308d/init/0.log" Jan 29 13:25:45 crc kubenswrapper[4852]: I0129 13:25:45.376256 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-gqwgh_531f02fa-337f-4e2a-a7fd-6877aacd308d/octavia-housekeeping/0.log" Jan 29 13:25:45 crc kubenswrapper[4852]: I0129 13:25:45.379651 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-qw2nx_19b0136d-e079-4cce-95ce-530955f5929a/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.048085 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-qw2nx_19b0136d-e079-4cce-95ce-530955f5929a/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.052144 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-qw2nx_19b0136d-e079-4cce-95ce-530955f5929a/octavia-amphora-httpd/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.193965 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-xw2qz_65be4657-225c-4b92-b227-cabe5bf069e2/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.390710 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-xw2qz_65be4657-225c-4b92-b227-cabe5bf069e2/octavia-rsyslog/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.392865 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-xw2qz_65be4657-225c-4b92-b227-cabe5bf069e2/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.504506 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-zwdbx_45059069-1ab9-4564-b135-7c9720565139/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.785182 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-zwdbx_45059069-1ab9-4564-b135-7c9720565139/init/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.871170 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9ae14cfa-d63c-4533-964e-4b87a973a38f/mysql-bootstrap/0.log" Jan 29 13:25:46 crc kubenswrapper[4852]: I0129 13:25:46.926918 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-zwdbx_45059069-1ab9-4564-b135-7c9720565139/octavia-worker/0.log" Jan 29 13:25:47 crc kubenswrapper[4852]: I0129 13:25:47.022254 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9ae14cfa-d63c-4533-964e-4b87a973a38f/mysql-bootstrap/0.log" Jan 29 13:25:47 crc kubenswrapper[4852]: I0129 13:25:47.057002 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_9ae14cfa-d63c-4533-964e-4b87a973a38f/galera/0.log" Jan 29 13:25:47 crc kubenswrapper[4852]: I0129 13:25:47.196725 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc3176db-9095-425e-ad2b-0fdcf60c6665/mysql-bootstrap/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.195912 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_b494fc9e-bf11-4ea2-a8aa-6c6df345f6b6/openstackclient/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.252214 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc3176db-9095-425e-ad2b-0fdcf60c6665/mysql-bootstrap/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.275279 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_cc3176db-9095-425e-ad2b-0fdcf60c6665/galera/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.446723 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6njt6_e97b86c6-6aa7-48e5-a225-a7c5abed3782/ovn-controller/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.533508 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-v55s7_c378ab9d-4549-4f89-84a2-9aad079bc575/openstack-network-exporter/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.721181 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-29mj5_7ff009d1-69d9-49a8-8fd2-caafaab09f52/ovsdb-server-init/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.893568 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-29mj5_7ff009d1-69d9-49a8-8fd2-caafaab09f52/ovsdb-server-init/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.920162 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-29mj5_7ff009d1-69d9-49a8-8fd2-caafaab09f52/ovs-vswitchd/0.log" Jan 29 13:25:48 crc kubenswrapper[4852]: I0129 13:25:48.948381 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-29mj5_7ff009d1-69d9-49a8-8fd2-caafaab09f52/ovsdb-server/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.169242 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_51e5b1b6-ed8d-4369-acea-ddc62c5a8945/ovn-northd/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.169965 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_51e5b1b6-ed8d-4369-acea-ddc62c5a8945/openstack-network-exporter/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.368940 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-khsth_19e6cd4d-7066-4b75-9b90-79226950ef2f/ovn-openstack-openstack-cell1/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.486436 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d5582ad6-370b-4202-964b-511f90ec4b23/openstack-network-exporter/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.504462 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d5582ad6-370b-4202-964b-511f90ec4b23/ovsdbserver-nb/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.638996 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_a7067e5b-383f-4726-af68-1011d2bed65f/openstack-network-exporter/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.737095 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_a7067e5b-383f-4726-af68-1011d2bed65f/ovsdbserver-nb/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.892038 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_599ccb34-f477-4540-a025-c0dd9cde5861/openstack-network-exporter/0.log" Jan 29 13:25:49 crc kubenswrapper[4852]: I0129 13:25:49.942600 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_599ccb34-f477-4540-a025-c0dd9cde5861/ovsdbserver-nb/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.088228 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ccd95a41-97ff-4038-9343-17bb0198af4f/openstack-network-exporter/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.134438 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ccd95a41-97ff-4038-9343-17bb0198af4f/ovsdbserver-sb/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.247016 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_52bbe7af-9fd8-4653-b05c-edd036fc3c9e/openstack-network-exporter/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.383925 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_52bbe7af-9fd8-4653-b05c-edd036fc3c9e/ovsdbserver-sb/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.483765 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9/openstack-network-exporter/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.592296 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_29ba0ffa-0a78-4ef9-82cf-7230e36cf3c9/ovsdbserver-sb/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.807706 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7cfd49c966-l87cv_9c20bbfd-ef79-4793-bd6f-fe5bc90162d0/placement-api/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.837990 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7cfd49c966-l87cv_9c20bbfd-ef79-4793-bd6f-fe5bc90162d0/placement-log/0.log" Jan 29 13:25:50 crc kubenswrapper[4852]: I0129 13:25:50.967376 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cfclr6_fe8d319f-c245-4fd4-87f3-2895eef499b9/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.089050 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5af19bd0-65c2-4a9b-9129-13c8d890652e/init-config-reloader/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.418122 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5af19bd0-65c2-4a9b-9129-13c8d890652e/thanos-sidecar/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.422972 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5af19bd0-65c2-4a9b-9129-13c8d890652e/init-config-reloader/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.427546 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5af19bd0-65c2-4a9b-9129-13c8d890652e/config-reloader/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.431956 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_5af19bd0-65c2-4a9b-9129-13c8d890652e/prometheus/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.609497 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6296c02b-df73-4079-99ee-a7e761047e7f/setup-container/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.845577 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6296c02b-df73-4079-99ee-a7e761047e7f/setup-container/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.875808 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2/setup-container/0.log" Jan 29 13:25:51 crc kubenswrapper[4852]: I0129 13:25:51.945744 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_6296c02b-df73-4079-99ee-a7e761047e7f/rabbitmq/0.log" Jan 29 13:25:52 crc kubenswrapper[4852]: I0129 13:25:52.190034 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2/setup-container/0.log" Jan 29 13:25:52 crc kubenswrapper[4852]: I0129 13:25:52.209187 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-p8ffg_02fc8a61-1749-4d19-962f-daaa15a078f6/reboot-os-openstack-openstack-cell1/0.log" Jan 29 13:25:52 crc kubenswrapper[4852]: I0129 13:25:52.398000 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-sqrzf_7f8604b9-a876-4028-a7a1-edccc04598b7/run-os-openstack-openstack-cell1/0.log" Jan 29 13:25:52 crc kubenswrapper[4852]: I0129 13:25:52.492755 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_eb71bdfc-e30e-4843-bc11-ed203c2f82f0/memcached/0.log" Jan 29 13:25:52 crc kubenswrapper[4852]: I0129 13:25:52.696256 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-x6l6b_d7e6ad57-a997-4ce9-989d-c5df67c43fb5/ssh-known-hosts-openstack/0.log" Jan 29 13:25:53 crc kubenswrapper[4852]: I0129 13:25:53.136768 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-82xj9_e7651f11-224f-4da6-92f6-fc6322e79044/telemetry-openstack-openstack-cell1/0.log" Jan 29 13:25:53 crc kubenswrapper[4852]: I0129 13:25:53.290271 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-9ds4h_6a211413-55d7-4f37-a2ea-c452ecba4bcc/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Jan 29 13:25:53 crc kubenswrapper[4852]: I0129 13:25:53.351400 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-kttsz_3d134483-206b-459e-86b2-5892ef691b64/validate-network-openstack-openstack-cell1/0.log" Jan 29 13:25:53 crc kubenswrapper[4852]: I0129 13:25:53.381576 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_571a0e86-c1cc-4d8a-b70f-c6a42e4eaeb2/rabbitmq/0.log" Jan 29 13:25:57 crc kubenswrapper[4852]: I0129 13:25:57.464624 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:25:57 crc kubenswrapper[4852]: E0129 13:25:57.467525 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:26:08 crc kubenswrapper[4852]: I0129 13:26:08.463529 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:26:08 crc kubenswrapper[4852]: E0129 13:26:08.464349 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.280746 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/util/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.475505 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/pull/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.481036 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/util/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.499384 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/pull/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.677822 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/util/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.697932 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/extract/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.704210 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_39e08c2cf21a42391401dc44b527dbd42b232d26fd4c253ad41cafaa866tdvq_08fd0cfa-ee12-49db-b31c-3bef3d85ee32/pull/0.log" Jan 29 13:26:18 crc kubenswrapper[4852]: I0129 13:26:18.988047 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8d874c8fc-pz8mg_0df531fa-b2f4-4122-b8e0-25fd0fb8df7b/manager/0.log" Jan 29 13:26:19 crc kubenswrapper[4852]: I0129 13:26:19.031728 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b6c4d8c5f-rqdx7_0fb78196-264f-44ee-b16b-4a26e4317789/manager/0.log" Jan 29 13:26:19 crc kubenswrapper[4852]: I0129 13:26:19.125248 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d9697b7f4-jgnnf_690fdb1f-20fe-4c9b-9297-7a4e3f623351/manager/0.log" Jan 29 13:26:19 crc kubenswrapper[4852]: I0129 13:26:19.520055 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69d6db494d-lg5cr_d67255eb-825c-46cc-9deb-8b82ef97a888/manager/0.log" Jan 29 13:26:19 crc kubenswrapper[4852]: I0129 13:26:19.570047 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8886f4c47-z7mxp_a7118d7b-d09a-405d-83f9-558e8d5895e1/manager/0.log" Jan 29 13:26:19 crc kubenswrapper[4852]: I0129 13:26:19.884284 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-7wjg2_01017d98-2eab-44db-8683-8a4ddd8f506f/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.162270 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5f4b8bd54d-db6p9_53942d52-1e48-4496-bc9b-118126410877/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.417293 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-9c8t7_1b860bbd-067b-42e5-9c41-78cc915a0a4f/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.437637 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7dd968899f-h67ft_f3f0b5e6-566b-4387-9c99-9b825afc6eec/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.463166 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:26:20 crc kubenswrapper[4852]: E0129 13:26:20.463708 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.511556 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-84f48565d4-9cqpx_dab24f31-d338-430f-b891-680412052eb8/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.663486 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-74mf9_a046a54a-a5c0-4807-9048-76f8513e916d/manager/0.log" Jan 29 13:26:20 crc kubenswrapper[4852]: I0129 13:26:20.786719 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-585dbc889-vrw88_3920c83a-21ab-417c-9d20-fac48cd65803/manager/0.log" Jan 29 13:26:21 crc kubenswrapper[4852]: I0129 13:26:21.040553 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-55bff696bd-5rr5x_d32dc864-8a2e-429d-ada3-55137a72ea98/manager/0.log" Jan 29 13:26:21 crc kubenswrapper[4852]: I0129 13:26:21.059425 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6687f8d877-bdlbj_bbdd4422-19ba-4b56-80d7-eb06aba3bab3/manager/0.log" Jan 29 13:26:21 crc kubenswrapper[4852]: I0129 13:26:21.180628 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-59c4b45c4dptjbc_b6874ffc-31d8-431e-8792-7bcb511ed0fe/manager/0.log" Jan 29 13:26:21 crc kubenswrapper[4852]: I0129 13:26:21.379149 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-78c665cf5d-n85lv_69f26544-96bf-4082-af1f-1210db1f56b2/operator/0.log" Jan 29 13:26:22 crc kubenswrapper[4852]: I0129 13:26:22.063549 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-ll8cd_e03a92f3-62d5-4d3c-ae9d-dc09f11644b3/registry-server/0.log" Jan 29 13:26:22 crc kubenswrapper[4852]: I0129 13:26:22.448868 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-n75l6_17656c45-80cf-44cd-92a5-3b4c90e16e02/manager/0.log" Jan 29 13:26:22 crc kubenswrapper[4852]: I0129 13:26:22.489453 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-5cp9z_37dab4de-5de7-41fc-9e1b-0b586a34f190/manager/0.log" Jan 29 13:26:22 crc kubenswrapper[4852]: I0129 13:26:22.653748 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-ddd7p_b62831eb-f626-40b6-b332-1fef36357275/operator/0.log" Jan 29 13:26:22 crc kubenswrapper[4852]: I0129 13:26:22.844300 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-5xcj7_55348ba6-6217-4dc4-99c6-9a7521bddb93/manager/0.log" Jan 29 13:26:23 crc kubenswrapper[4852]: I0129 13:26:23.061836 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-fwfc2_1f32b855-3964-4f2b-b958-57789ebc722a/manager/0.log" Jan 29 13:26:23 crc kubenswrapper[4852]: I0129 13:26:23.097622 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b5b76f97-h5rtr_c28733de-04a8-4eca-a143-cc129e122b7b/manager/0.log" Jan 29 13:26:23 crc kubenswrapper[4852]: I0129 13:26:23.189224 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-564965969-js7vz_fe66ce58-71f6-409f-a714-563da2885d40/manager/0.log" Jan 29 13:26:24 crc kubenswrapper[4852]: I0129 13:26:24.463687 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58857b9c54-qhkm5_8950e863-95ea-40e3-a812-9df855499f77/manager/0.log" Jan 29 13:26:35 crc kubenswrapper[4852]: I0129 13:26:35.463391 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:26:35 crc kubenswrapper[4852]: E0129 13:26:35.464295 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:26:44 crc kubenswrapper[4852]: I0129 13:26:44.482651 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-99tmr_53eb6e2d-e130-4ef0-8242-d429c1cf2be0/control-plane-machine-set-operator/0.log" Jan 29 13:26:44 crc kubenswrapper[4852]: I0129 13:26:44.657960 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-84ffc_014973b5-c724-49a0-ab5d-2a1a80328f4e/kube-rbac-proxy/0.log" Jan 29 13:26:44 crc kubenswrapper[4852]: I0129 13:26:44.729381 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-84ffc_014973b5-c724-49a0-ab5d-2a1a80328f4e/machine-api-operator/0.log" Jan 29 13:26:50 crc kubenswrapper[4852]: I0129 13:26:50.465176 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:26:50 crc kubenswrapper[4852]: E0129 13:26:50.465775 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:26:58 crc kubenswrapper[4852]: I0129 13:26:58.442436 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-545d4d4674-wr726_edbcdb0c-875a-4c47-b2d0-d98d5d9b13b0/cert-manager-controller/0.log" Jan 29 13:26:58 crc kubenswrapper[4852]: I0129 13:26:58.610386 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-5545bd876-9cvtz_b5435b15-3424-42c4-9d63-d3f37d3226f4/cert-manager-cainjector/0.log" Jan 29 13:26:58 crc kubenswrapper[4852]: I0129 13:26:58.776829 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-6888856db4-w72np_8379fbbf-af14-42ac-af6e-6d08d56fd6c6/cert-manager-webhook/0.log" Jan 29 13:27:02 crc kubenswrapper[4852]: I0129 13:27:02.464102 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:27:02 crc kubenswrapper[4852]: E0129 13:27:02.465020 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:27:13 crc kubenswrapper[4852]: I0129 13:27:13.472303 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:27:13 crc kubenswrapper[4852]: E0129 13:27:13.473194 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.413130 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wpgfj_6ca6b7d4-7a27-4561-886b-045873d5a78a/nmstate-console-plugin/0.log" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.642384 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-bwjjq_b978ce05-e091-4dc5-814d-47378f31ea22/nmstate-handler/0.log" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.701657 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-zp86n_bdf996a1-0482-4e91-8a82-99a9319a4711/kube-rbac-proxy/0.log" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.764572 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-zp86n_bdf996a1-0482-4e91-8a82-99a9319a4711/nmstate-metrics/0.log" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.883450 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9rpdb_1453c2c3-5de9-4ea5-91e1-d19133f4877d/nmstate-operator/0.log" Jan 29 13:27:14 crc kubenswrapper[4852]: I0129 13:27:14.992440 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-ldknj_bbdd7c60-4fcc-41b6-ac93-7e04418eb1b0/nmstate-webhook/0.log" Jan 29 13:27:27 crc kubenswrapper[4852]: I0129 13:27:27.463375 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:27:27 crc kubenswrapper[4852]: E0129 13:27:27.464141 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:27:30 crc kubenswrapper[4852]: I0129 13:27:30.954019 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-wxsg7_a9853c72-f7e7-4ece-b626-e7d5a6fdbef1/prometheus-operator/0.log" Jan 29 13:27:31 crc kubenswrapper[4852]: I0129 13:27:31.266687 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6448976d6-pjngj_d72564b9-92ba-48b1-ac14-3f7d0c257191/prometheus-operator-admission-webhook/0.log" Jan 29 13:27:31 crc kubenswrapper[4852]: I0129 13:27:31.537313 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6448976d6-z4bpj_a34c5dcb-da69-43ae-9e4b-42a942b3cf40/prometheus-operator-admission-webhook/0.log" Jan 29 13:27:31 crc kubenswrapper[4852]: I0129 13:27:31.618525 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-d85kb_4418d202-0abd-48f2-8216-49462f1f5e1f/operator/0.log" Jan 29 13:27:31 crc kubenswrapper[4852]: I0129 13:27:31.802207 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-2p5pq_db57a9e8-3292-4515-a24d-244418fc98ba/perses-operator/0.log" Jan 29 13:27:39 crc kubenswrapper[4852]: I0129 13:27:39.463686 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:27:39 crc kubenswrapper[4852]: E0129 13:27:39.464651 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.054519 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mtg5b_bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d/kube-rbac-proxy/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.182294 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-frr-files/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.475371 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mtg5b_bb398b75-6d13-4c14-ab52-0cf2c6a4eb7d/controller/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.573873 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-reloader/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.617242 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-metrics/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.699269 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-frr-files/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.760655 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-reloader/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.913802 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-frr-files/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.954993 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-reloader/0.log" Jan 29 13:27:48 crc kubenswrapper[4852]: I0129 13:27:48.991659 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-metrics/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.030832 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-metrics/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.247068 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-frr-files/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.253204 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-metrics/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.256661 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/controller/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.274286 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/cp-reloader/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.427110 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/frr-metrics/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.466013 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/kube-rbac-proxy/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.495509 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/kube-rbac-proxy-frr/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.688754 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/reloader/0.log" Jan 29 13:27:49 crc kubenswrapper[4852]: I0129 13:27:49.972834 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-pz8m8_1fc7e862-7101-4e92-9429-567296738d25/frr-k8s-webhook-server/0.log" Jan 29 13:27:50 crc kubenswrapper[4852]: I0129 13:27:50.247701 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6445fdf5fb-tfjfg_00b7e7c3-bd65-4c98-aff5-4fbd340b4ad9/manager/0.log" Jan 29 13:27:50 crc kubenswrapper[4852]: I0129 13:27:50.333008 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-785876fb9c-fnwz4_2131fae0-adff-4b95-8e86-5915ad516749/webhook-server/0.log" Jan 29 13:27:50 crc kubenswrapper[4852]: I0129 13:27:50.618958 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rtdgk_bd9b41ba-dc2c-415c-b903-b906d8f96078/kube-rbac-proxy/0.log" Jan 29 13:27:51 crc kubenswrapper[4852]: I0129 13:27:51.606371 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rtdgk_bd9b41ba-dc2c-415c-b903-b906d8f96078/speaker/0.log" Jan 29 13:27:52 crc kubenswrapper[4852]: I0129 13:27:52.463187 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:27:52 crc kubenswrapper[4852]: E0129 13:27:52.463827 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:27:52 crc kubenswrapper[4852]: I0129 13:27:52.769063 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rgc7d_fac2b4a6-eb46-4998-bf17-6f7b73b3b43d/frr/0.log" Jan 29 13:28:06 crc kubenswrapper[4852]: I0129 13:28:06.463317 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:28:06 crc kubenswrapper[4852]: E0129 13:28:06.464149 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:28:07 crc kubenswrapper[4852]: I0129 13:28:07.074274 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/util/0.log" Jan 29 13:28:07 crc kubenswrapper[4852]: I0129 13:28:07.260531 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/pull/0.log" Jan 29 13:28:07 crc kubenswrapper[4852]: I0129 13:28:07.274474 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/util/0.log" Jan 29 13:28:07 crc kubenswrapper[4852]: I0129 13:28:07.345486 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/pull/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.152900 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/util/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.200002 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/pull/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.203348 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcmrqpf_ca05a8d8-1629-4d04-9ab9-0da017f65631/extract/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.383070 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/util/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.617688 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/util/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.625604 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/pull/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.637934 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/pull/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.899601 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/util/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.925099 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/pull/0.log" Jan 29 13:28:08 crc kubenswrapper[4852]: I0129 13:28:08.925479 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713cfgfx_e7cd1841-eae4-4fe0-ac76-87a98e2118a9/extract/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.145760 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/util/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.394978 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/pull/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.411493 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/util/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.428626 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/pull/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.638888 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/util/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.662716 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/pull/0.log" Jan 29 13:28:09 crc kubenswrapper[4852]: I0129 13:28:09.694758 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5w92p8_1402be79-7e7e-42ee-8c65-8b8cfa48fd1a/extract/0.log" Jan 29 13:28:10 crc kubenswrapper[4852]: I0129 13:28:10.364352 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/util/0.log" Jan 29 13:28:10 crc kubenswrapper[4852]: I0129 13:28:10.739303 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/pull/0.log" Jan 29 13:28:10 crc kubenswrapper[4852]: I0129 13:28:10.748036 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/pull/0.log" Jan 29 13:28:10 crc kubenswrapper[4852]: I0129 13:28:10.874604 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/util/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.075317 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/extract/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.079178 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/pull/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.188321 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08gttrk_2bc8818d-aaba-4852-8a76-7933da07170d/util/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.369292 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-utilities/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.603729 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-content/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.631598 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-utilities/0.log" Jan 29 13:28:11 crc kubenswrapper[4852]: I0129 13:28:11.633979 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-content/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.105857 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-content/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.134003 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/extract-utilities/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.253077 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-utilities/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.480261 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-utilities/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.491193 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-content/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.561825 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-content/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.777940 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-content/0.log" Jan 29 13:28:12 crc kubenswrapper[4852]: I0129 13:28:12.907745 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.088295 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.104086 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-df8ql_349eef38-2a3f-44a3-b5f1-50582e6c34ca/registry-server/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.274527 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.373156 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-content/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.393555 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ljj87_5d09a2d7-28da-404a-9a31-30364cf716a0/registry-server/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.396189 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-content/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.562451 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.572171 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/extract-content/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.611679 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.797639 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-dvfth_b6ada955-7839-4453-b285-aa8c3f02ef76/registry-server/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.893859 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-utilities/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.935946 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-content/0.log" Jan 29 13:28:13 crc kubenswrapper[4852]: I0129 13:28:13.963207 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-content/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.147876 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-utilities/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.161807 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/extract-content/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.223739 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-utilities/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.447959 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-utilities/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.490395 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-content/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.560431 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-content/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.699222 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-content/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.736376 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/extract-utilities/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.936944 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qt4xt_45f6e7f7-6b36-4deb-969d-fbc2bdeee284/registry-server/0.log" Jan 29 13:28:14 crc kubenswrapper[4852]: I0129 13:28:14.940393 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-utilities/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.121219 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-utilities/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.153792 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-content/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.225239 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-content/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.437193 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-content/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.452189 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/extract-utilities/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.466286 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2lzr_42b49ee7-0bb4-43b0-ba65-2ec3c09c993c/registry-server/0.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.706234 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/1.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.707920 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wqx8_4326595e-3e2b-4ec6-b44f-9e8aa025849f/marketplace-operator/2.log" Jan 29 13:28:15 crc kubenswrapper[4852]: I0129 13:28:15.729741 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-utilities/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.021819 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-utilities/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.048834 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.085763 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.169138 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-tn8zl_d4141fa7-4395-49ab-a156-538f6aaa9093/registry-server/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.286983 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-utilities/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.342622 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.392890 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-utilities/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.666148 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-j5pj7_470d976b-738e-4252-8dfb-30e9b0a5fdbf/registry-server/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.668921 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.673782 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.679652 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-utilities/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.886259 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-content/0.log" Jan 29 13:28:16 crc kubenswrapper[4852]: I0129 13:28:16.933813 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/extract-utilities/0.log" Jan 29 13:28:18 crc kubenswrapper[4852]: I0129 13:28:18.008210 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-67jbx_c3dfa5e5-86ea-4834-b97e-8d5831bd2f01/registry-server/0.log" Jan 29 13:28:19 crc kubenswrapper[4852]: I0129 13:28:19.463789 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:28:19 crc kubenswrapper[4852]: E0129 13:28:19.464509 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:28:31 crc kubenswrapper[4852]: I0129 13:28:31.488196 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:28:31 crc kubenswrapper[4852]: E0129 13:28:31.489498 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:28:32 crc kubenswrapper[4852]: I0129 13:28:32.887346 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6448976d6-pjngj_d72564b9-92ba-48b1-ac14-3f7d0c257191/prometheus-operator-admission-webhook/0.log" Jan 29 13:28:32 crc kubenswrapper[4852]: I0129 13:28:32.906191 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-wxsg7_a9853c72-f7e7-4ece-b626-e7d5a6fdbef1/prometheus-operator/0.log" Jan 29 13:28:32 crc kubenswrapper[4852]: I0129 13:28:32.939155 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6448976d6-z4bpj_a34c5dcb-da69-43ae-9e4b-42a942b3cf40/prometheus-operator-admission-webhook/0.log" Jan 29 13:28:33 crc kubenswrapper[4852]: I0129 13:28:33.080226 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-2p5pq_db57a9e8-3292-4515-a24d-244418fc98ba/perses-operator/0.log" Jan 29 13:28:33 crc kubenswrapper[4852]: I0129 13:28:33.129838 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-d85kb_4418d202-0abd-48f2-8216-49462f1f5e1f/operator/0.log" Jan 29 13:28:44 crc kubenswrapper[4852]: I0129 13:28:44.463813 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:28:44 crc kubenswrapper[4852]: E0129 13:28:44.464563 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:28:56 crc kubenswrapper[4852]: I0129 13:28:56.463173 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:28:56 crc kubenswrapper[4852]: E0129 13:28:56.465156 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:29:07 crc kubenswrapper[4852]: I0129 13:29:07.026911 4852 scope.go:117] "RemoveContainer" containerID="b2d8eb968fd2aa3456266e22afe726f95b27d9f51050517bd5a2fb05a8e6e778" Jan 29 13:29:11 crc kubenswrapper[4852]: I0129 13:29:11.463959 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:29:11 crc kubenswrapper[4852]: E0129 13:29:11.464702 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:29:24 crc kubenswrapper[4852]: I0129 13:29:24.464034 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:29:24 crc kubenswrapper[4852]: E0129 13:29:24.464948 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:29:38 crc kubenswrapper[4852]: I0129 13:29:38.462784 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:29:38 crc kubenswrapper[4852]: E0129 13:29:38.463921 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.723648 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:29:45 crc kubenswrapper[4852]: E0129 13:29:45.725308 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="extract-utilities" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.725391 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="extract-utilities" Jan 29 13:29:45 crc kubenswrapper[4852]: E0129 13:29:45.725461 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="extract-content" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.725519 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="extract-content" Jan 29 13:29:45 crc kubenswrapper[4852]: E0129 13:29:45.725623 4852 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="registry-server" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.725684 4852 state_mem.go:107] "Deleted CPUSet assignment" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="registry-server" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.725937 4852 memory_manager.go:354] "RemoveStaleState removing state" podUID="b69be642-e330-43a2-bd81-2358e0d2369a" containerName="registry-server" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.727547 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.762657 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.846914 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q22mh\" (UniqueName: \"kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.847004 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.847230 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.949377 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.949478 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.949660 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q22mh\" (UniqueName: \"kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.950165 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.950185 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:45 crc kubenswrapper[4852]: I0129 13:29:45.975752 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q22mh\" (UniqueName: \"kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh\") pod \"redhat-marketplace-czjhx\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:46 crc kubenswrapper[4852]: I0129 13:29:46.056273 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:46 crc kubenswrapper[4852]: I0129 13:29:46.620921 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:29:46 crc kubenswrapper[4852]: W0129 13:29:46.624306 4852 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2638e0f3_6473_4a49_bfa0_0aba72ed5738.slice/crio-098c61c6c2f539487e1c164f7b3a594820dfc98936665d507850b0a362dcabda WatchSource:0}: Error finding container 098c61c6c2f539487e1c164f7b3a594820dfc98936665d507850b0a362dcabda: Status 404 returned error can't find the container with id 098c61c6c2f539487e1c164f7b3a594820dfc98936665d507850b0a362dcabda Jan 29 13:29:46 crc kubenswrapper[4852]: I0129 13:29:46.739410 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerStarted","Data":"098c61c6c2f539487e1c164f7b3a594820dfc98936665d507850b0a362dcabda"} Jan 29 13:29:47 crc kubenswrapper[4852]: I0129 13:29:47.755959 4852 generic.go:334] "Generic (PLEG): container finished" podID="2638e0f3-6473-4a49-bfa0-0aba72ed5738" containerID="049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f" exitCode=0 Jan 29 13:29:47 crc kubenswrapper[4852]: I0129 13:29:47.756100 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerDied","Data":"049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f"} Jan 29 13:29:47 crc kubenswrapper[4852]: I0129 13:29:47.760764 4852 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:29:49 crc kubenswrapper[4852]: I0129 13:29:49.780052 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerStarted","Data":"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837"} Jan 29 13:29:50 crc kubenswrapper[4852]: I0129 13:29:50.790452 4852 generic.go:334] "Generic (PLEG): container finished" podID="2638e0f3-6473-4a49-bfa0-0aba72ed5738" containerID="56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837" exitCode=0 Jan 29 13:29:50 crc kubenswrapper[4852]: I0129 13:29:50.790503 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerDied","Data":"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837"} Jan 29 13:29:52 crc kubenswrapper[4852]: I0129 13:29:52.816337 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerStarted","Data":"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf"} Jan 29 13:29:52 crc kubenswrapper[4852]: I0129 13:29:52.853569 4852 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-czjhx" podStartSLOduration=4.295370225 podStartE2EDuration="7.853552373s" podCreationTimestamp="2026-01-29 13:29:45 +0000 UTC" firstStartedPulling="2026-01-29 13:29:47.760542234 +0000 UTC m=+10084.977873358" lastFinishedPulling="2026-01-29 13:29:51.318724372 +0000 UTC m=+10088.536055506" observedRunningTime="2026-01-29 13:29:52.844042691 +0000 UTC m=+10090.061373825" watchObservedRunningTime="2026-01-29 13:29:52.853552373 +0000 UTC m=+10090.070883507" Jan 29 13:29:53 crc kubenswrapper[4852]: I0129 13:29:53.472252 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:29:53 crc kubenswrapper[4852]: E0129 13:29:53.472893 4852 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zdz6d_openshift-machine-config-operator(23a48459-954c-4e1a-bd79-bc6018bc255f)\"" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" Jan 29 13:29:56 crc kubenswrapper[4852]: I0129 13:29:56.057200 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:56 crc kubenswrapper[4852]: I0129 13:29:56.057905 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:29:56 crc kubenswrapper[4852]: I0129 13:29:56.122958 4852 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.155217 4852 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8"] Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.158228 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.160824 4852 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.162757 4852 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.176770 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8"] Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.299068 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.299596 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.299770 4852 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mkdh\" (UniqueName: \"kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.402341 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.402692 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mkdh\" (UniqueName: \"kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.403885 4852 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.404780 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.419555 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mkdh\" (UniqueName: \"kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.420293 4852 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume\") pod \"collect-profiles-29494890-5mmv8\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.480128 4852 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:00 crc kubenswrapper[4852]: I0129 13:30:00.993692 4852 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8"] Jan 29 13:30:01 crc kubenswrapper[4852]: I0129 13:30:01.953744 4852 generic.go:334] "Generic (PLEG): container finished" podID="2020546f-955a-4508-a9f2-55023e978db7" containerID="64d6dfbabe0c4073796155e43d680b588a058dcf9d17e22595a79ce406045a4d" exitCode=0 Jan 29 13:30:01 crc kubenswrapper[4852]: I0129 13:30:01.953887 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" event={"ID":"2020546f-955a-4508-a9f2-55023e978db7","Type":"ContainerDied","Data":"64d6dfbabe0c4073796155e43d680b588a058dcf9d17e22595a79ce406045a4d"} Jan 29 13:30:01 crc kubenswrapper[4852]: I0129 13:30:01.954272 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" event={"ID":"2020546f-955a-4508-a9f2-55023e978db7","Type":"ContainerStarted","Data":"2dee12aa86adcdb2d4aa4db21a302a1ebc298eb8dfee973e41b9ce3770c84e1f"} Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.206315 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.296942 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume\") pod \"2020546f-955a-4508-a9f2-55023e978db7\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.297043 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume\") pod \"2020546f-955a-4508-a9f2-55023e978db7\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.297155 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mkdh\" (UniqueName: \"kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh\") pod \"2020546f-955a-4508-a9f2-55023e978db7\" (UID: \"2020546f-955a-4508-a9f2-55023e978db7\") " Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.297981 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume" (OuterVolumeSpecName: "config-volume") pod "2020546f-955a-4508-a9f2-55023e978db7" (UID: "2020546f-955a-4508-a9f2-55023e978db7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.302769 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh" (OuterVolumeSpecName: "kube-api-access-6mkdh") pod "2020546f-955a-4508-a9f2-55023e978db7" (UID: "2020546f-955a-4508-a9f2-55023e978db7"). InnerVolumeSpecName "kube-api-access-6mkdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.307762 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2020546f-955a-4508-a9f2-55023e978db7" (UID: "2020546f-955a-4508-a9f2-55023e978db7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.400465 4852 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2020546f-955a-4508-a9f2-55023e978db7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.400516 4852 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2020546f-955a-4508-a9f2-55023e978db7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.400531 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mkdh\" (UniqueName: \"kubernetes.io/projected/2020546f-955a-4508-a9f2-55023e978db7-kube-api-access-6mkdh\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.986683 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" event={"ID":"2020546f-955a-4508-a9f2-55023e978db7","Type":"ContainerDied","Data":"2dee12aa86adcdb2d4aa4db21a302a1ebc298eb8dfee973e41b9ce3770c84e1f"} Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.986937 4852 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2dee12aa86adcdb2d4aa4db21a302a1ebc298eb8dfee973e41b9ce3770c84e1f" Jan 29 13:30:04 crc kubenswrapper[4852]: I0129 13:30:04.986767 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-5mmv8" Jan 29 13:30:05 crc kubenswrapper[4852]: I0129 13:30:05.294633 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n"] Jan 29 13:30:05 crc kubenswrapper[4852]: I0129 13:30:05.304165 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-8md5n"] Jan 29 13:30:05 crc kubenswrapper[4852]: I0129 13:30:05.474538 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a5e857d-921e-47da-8038-1b71b80384d5" path="/var/lib/kubelet/pods/5a5e857d-921e-47da-8038-1b71b80384d5/volumes" Jan 29 13:30:06 crc kubenswrapper[4852]: I0129 13:30:06.462870 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:30:06 crc kubenswrapper[4852]: I0129 13:30:06.623063 4852 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:30:06 crc kubenswrapper[4852]: I0129 13:30:06.685341 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.018904 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"c6b8a277ecd1e92e566c2f1e5ad98d4b891aa45ef764008d89cfe1e4e85cb73b"} Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.019094 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-czjhx" podUID="2638e0f3-6473-4a49-bfa0-0aba72ed5738" containerName="registry-server" containerID="cri-o://4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf" gracePeriod=2 Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.098267 4852 scope.go:117] "RemoveContainer" containerID="e2bf98532ab48518cde3cd02e5ddd69aad9df58fa5a1f35c2b8352c74dca0702" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.225398 4852 scope.go:117] "RemoveContainer" containerID="4ae149e499e8e0c822a974b46632a0f2dbfffc75478f7b89ca810806259f9d1d" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.279392 4852 scope.go:117] "RemoveContainer" containerID="564b860e992e478b245b3348090df6684fb5ca7faa2d066f34c0845016a77086" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.535543 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.584824 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q22mh\" (UniqueName: \"kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh\") pod \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.585044 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities\") pod \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.585088 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content\") pod \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\" (UID: \"2638e0f3-6473-4a49-bfa0-0aba72ed5738\") " Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.589017 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities" (OuterVolumeSpecName: "utilities") pod "2638e0f3-6473-4a49-bfa0-0aba72ed5738" (UID: "2638e0f3-6473-4a49-bfa0-0aba72ed5738"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.617137 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh" (OuterVolumeSpecName: "kube-api-access-q22mh") pod "2638e0f3-6473-4a49-bfa0-0aba72ed5738" (UID: "2638e0f3-6473-4a49-bfa0-0aba72ed5738"). InnerVolumeSpecName "kube-api-access-q22mh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.618865 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2638e0f3-6473-4a49-bfa0-0aba72ed5738" (UID: "2638e0f3-6473-4a49-bfa0-0aba72ed5738"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.688611 4852 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.688655 4852 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2638e0f3-6473-4a49-bfa0-0aba72ed5738-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:07 crc kubenswrapper[4852]: I0129 13:30:07.688672 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q22mh\" (UniqueName: \"kubernetes.io/projected/2638e0f3-6473-4a49-bfa0-0aba72ed5738-kube-api-access-q22mh\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.035281 4852 generic.go:334] "Generic (PLEG): container finished" podID="2638e0f3-6473-4a49-bfa0-0aba72ed5738" containerID="4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf" exitCode=0 Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.035339 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerDied","Data":"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf"} Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.035358 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-czjhx" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.035387 4852 scope.go:117] "RemoveContainer" containerID="4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.035373 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-czjhx" event={"ID":"2638e0f3-6473-4a49-bfa0-0aba72ed5738","Type":"ContainerDied","Data":"098c61c6c2f539487e1c164f7b3a594820dfc98936665d507850b0a362dcabda"} Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.062793 4852 scope.go:117] "RemoveContainer" containerID="56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.072958 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.084558 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-czjhx"] Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.095904 4852 scope.go:117] "RemoveContainer" containerID="049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.145636 4852 scope.go:117] "RemoveContainer" containerID="4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf" Jan 29 13:30:08 crc kubenswrapper[4852]: E0129 13:30:08.146035 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf\": container with ID starting with 4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf not found: ID does not exist" containerID="4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.146066 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf"} err="failed to get container status \"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf\": rpc error: code = NotFound desc = could not find container \"4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf\": container with ID starting with 4eee27d3c05ea4406b914f4b9d7da86eca883e48cc7ebb5082a3ce9a73b3dabf not found: ID does not exist" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.146086 4852 scope.go:117] "RemoveContainer" containerID="56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837" Jan 29 13:30:08 crc kubenswrapper[4852]: E0129 13:30:08.146362 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837\": container with ID starting with 56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837 not found: ID does not exist" containerID="56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.146392 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837"} err="failed to get container status \"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837\": rpc error: code = NotFound desc = could not find container \"56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837\": container with ID starting with 56ae228a4fa97727d6dbabae53de29791a44f02a3e9857c65208da6d3ee67837 not found: ID does not exist" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.146411 4852 scope.go:117] "RemoveContainer" containerID="049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f" Jan 29 13:30:08 crc kubenswrapper[4852]: E0129 13:30:08.146698 4852 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f\": container with ID starting with 049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f not found: ID does not exist" containerID="049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f" Jan 29 13:30:08 crc kubenswrapper[4852]: I0129 13:30:08.146721 4852 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f"} err="failed to get container status \"049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f\": rpc error: code = NotFound desc = could not find container \"049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f\": container with ID starting with 049cfb167b9bd17afefb4c6e7ef0f9046586d38568315538b5ada0cb17b3ae5f not found: ID does not exist" Jan 29 13:30:09 crc kubenswrapper[4852]: I0129 13:30:09.476517 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2638e0f3-6473-4a49-bfa0-0aba72ed5738" path="/var/lib/kubelet/pods/2638e0f3-6473-4a49-bfa0-0aba72ed5738/volumes" Jan 29 13:30:53 crc kubenswrapper[4852]: I0129 13:30:53.539206 4852 generic.go:334] "Generic (PLEG): container finished" podID="de6244cd-ee28-45e9-92f8-03b9dbbd3417" containerID="df8eef2d2c445b16716fd7d1fe702d7ee220b0ea68bf63c512ba59564a585acc" exitCode=0 Jan 29 13:30:53 crc kubenswrapper[4852]: I0129 13:30:53.539292 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" event={"ID":"de6244cd-ee28-45e9-92f8-03b9dbbd3417","Type":"ContainerDied","Data":"df8eef2d2c445b16716fd7d1fe702d7ee220b0ea68bf63c512ba59564a585acc"} Jan 29 13:30:53 crc kubenswrapper[4852]: I0129 13:30:53.541500 4852 scope.go:117] "RemoveContainer" containerID="df8eef2d2c445b16716fd7d1fe702d7ee220b0ea68bf63c512ba59564a585acc" Jan 29 13:30:53 crc kubenswrapper[4852]: I0129 13:30:53.963101 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w4bvc_must-gather-zdtbg_de6244cd-ee28-45e9-92f8-03b9dbbd3417/gather/0.log" Jan 29 13:31:04 crc kubenswrapper[4852]: I0129 13:31:04.263468 4852 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w4bvc/must-gather-zdtbg"] Jan 29 13:31:04 crc kubenswrapper[4852]: I0129 13:31:04.264445 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" podUID="de6244cd-ee28-45e9-92f8-03b9dbbd3417" containerName="copy" containerID="cri-o://da0eaa4369b00b83a661c988d1b15e49cdc8978942231d54c22ea7be2db8540f" gracePeriod=2 Jan 29 13:31:04 crc kubenswrapper[4852]: I0129 13:31:04.277040 4852 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w4bvc/must-gather-zdtbg"] Jan 29 13:31:04 crc kubenswrapper[4852]: I0129 13:31:04.655236 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w4bvc_must-gather-zdtbg_de6244cd-ee28-45e9-92f8-03b9dbbd3417/copy/0.log" Jan 29 13:31:04 crc kubenswrapper[4852]: I0129 13:31:04.656105 4852 generic.go:334] "Generic (PLEG): container finished" podID="de6244cd-ee28-45e9-92f8-03b9dbbd3417" containerID="da0eaa4369b00b83a661c988d1b15e49cdc8978942231d54c22ea7be2db8540f" exitCode=143 Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.501808 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w4bvc_must-gather-zdtbg_de6244cd-ee28-45e9-92f8-03b9dbbd3417/copy/0.log" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.502917 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.649184 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85bxz\" (UniqueName: \"kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz\") pod \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.649603 4852 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output\") pod \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\" (UID: \"de6244cd-ee28-45e9-92f8-03b9dbbd3417\") " Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.655331 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz" (OuterVolumeSpecName: "kube-api-access-85bxz") pod "de6244cd-ee28-45e9-92f8-03b9dbbd3417" (UID: "de6244cd-ee28-45e9-92f8-03b9dbbd3417"). InnerVolumeSpecName "kube-api-access-85bxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.669919 4852 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w4bvc_must-gather-zdtbg_de6244cd-ee28-45e9-92f8-03b9dbbd3417/copy/0.log" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.670461 4852 scope.go:117] "RemoveContainer" containerID="da0eaa4369b00b83a661c988d1b15e49cdc8978942231d54c22ea7be2db8540f" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.670549 4852 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w4bvc/must-gather-zdtbg" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.724051 4852 scope.go:117] "RemoveContainer" containerID="df8eef2d2c445b16716fd7d1fe702d7ee220b0ea68bf63c512ba59564a585acc" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.752790 4852 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85bxz\" (UniqueName: \"kubernetes.io/projected/de6244cd-ee28-45e9-92f8-03b9dbbd3417-kube-api-access-85bxz\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.868727 4852 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "de6244cd-ee28-45e9-92f8-03b9dbbd3417" (UID: "de6244cd-ee28-45e9-92f8-03b9dbbd3417"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:31:05 crc kubenswrapper[4852]: I0129 13:31:05.957234 4852 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de6244cd-ee28-45e9-92f8-03b9dbbd3417-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:07 crc kubenswrapper[4852]: I0129 13:31:07.488012 4852 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de6244cd-ee28-45e9-92f8-03b9dbbd3417" path="/var/lib/kubelet/pods/de6244cd-ee28-45e9-92f8-03b9dbbd3417/volumes" Jan 29 13:32:30 crc kubenswrapper[4852]: I0129 13:32:30.016642 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:32:30 crc kubenswrapper[4852]: I0129 13:32:30.017173 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:33:00 crc kubenswrapper[4852]: I0129 13:33:00.017654 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:33:00 crc kubenswrapper[4852]: I0129 13:33:00.018492 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.017391 4852 patch_prober.go:28] interesting pod/machine-config-daemon-zdz6d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.018007 4852 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.018078 4852 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.019025 4852 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6b8a277ecd1e92e566c2f1e5ad98d4b891aa45ef764008d89cfe1e4e85cb73b"} pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.019091 4852 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" podUID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerName="machine-config-daemon" containerID="cri-o://c6b8a277ecd1e92e566c2f1e5ad98d4b891aa45ef764008d89cfe1e4e85cb73b" gracePeriod=600 Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.202212 4852 generic.go:334] "Generic (PLEG): container finished" podID="23a48459-954c-4e1a-bd79-bc6018bc255f" containerID="c6b8a277ecd1e92e566c2f1e5ad98d4b891aa45ef764008d89cfe1e4e85cb73b" exitCode=0 Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.202272 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerDied","Data":"c6b8a277ecd1e92e566c2f1e5ad98d4b891aa45ef764008d89cfe1e4e85cb73b"} Jan 29 13:33:30 crc kubenswrapper[4852]: I0129 13:33:30.202328 4852 scope.go:117] "RemoveContainer" containerID="6b3cea23ea8d1cbe0484a3ca4ea634e736f324b5eb67fd0fa1495e432d3278fa" Jan 29 13:33:31 crc kubenswrapper[4852]: I0129 13:33:31.227276 4852 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zdz6d" event={"ID":"23a48459-954c-4e1a-bd79-bc6018bc255f","Type":"ContainerStarted","Data":"a15832dff688c9af5eb84580e0814baf4ca536a8078c96e37c7faf78f9fe2935"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136660716024460 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136660717017376 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136634165016517 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136634165015467 5ustar corecore